Sun Jul 26 15:30:36 2015 UTC ()
Avoid NULL dev_priv->vlv_pctx-> deref in a WARN check on Lenovo B50-30,
add an additional check for the NULL dev_priv->vlv_pctx.
System now boots (though does not recognise any of the USB(3) ports)
pullup#7


(abs)
diff -r1.6 -r1.7 src/sys/external/bsd/drm2/dist/drm/i915/intel_pm.c

cvs diff -r1.6 -r1.7 src/sys/external/bsd/drm2/dist/drm/i915/intel_pm.c (switch to unified diff)

--- src/sys/external/bsd/drm2/dist/drm/i915/intel_pm.c 2015/02/25 13:06:13 1.6
+++ src/sys/external/bsd/drm2/dist/drm/i915/intel_pm.c 2015/07/26 15:30:36 1.7
@@ -2629,1998 +2629,2000 @@ static void ilk_update_wm(struct drm_crt @@ -2629,1998 +2629,2000 @@ static void ilk_update_wm(struct drm_crt
2629 config.num_pipes_active == 1 && config.sprites_enabled) { 2629 config.num_pipes_active == 1 && config.sprites_enabled) {
2630 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max); 2630 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
2631 ilk_wm_merge(dev, &config, &max, &lp_wm_5_6); 2631 ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
2632 2632
2633 best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6); 2633 best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
2634 } else { 2634 } else {
2635 best_lp_wm = &lp_wm_1_2; 2635 best_lp_wm = &lp_wm_1_2;
2636 } 2636 }
2637 2637
2638 partitioning = (best_lp_wm == &lp_wm_1_2) ? 2638 partitioning = (best_lp_wm == &lp_wm_1_2) ?
2639 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6; 2639 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
2640 2640
2641 ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results); 2641 ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
2642 2642
2643 ilk_write_wm_values(dev_priv, &results); 2643 ilk_write_wm_values(dev_priv, &results);
2644} 2644}
2645 2645
2646static void ilk_update_sprite_wm(struct drm_plane *plane, 2646static void ilk_update_sprite_wm(struct drm_plane *plane,
2647 struct drm_crtc *crtc, 2647 struct drm_crtc *crtc,
2648 uint32_t sprite_width, int pixel_size, 2648 uint32_t sprite_width, int pixel_size,
2649 bool enabled, bool scaled) 2649 bool enabled, bool scaled)
2650{ 2650{
2651 struct drm_device *dev = plane->dev; 2651 struct drm_device *dev = plane->dev;
2652 struct intel_plane *intel_plane = to_intel_plane(plane); 2652 struct intel_plane *intel_plane = to_intel_plane(plane);
2653 2653
2654 intel_plane->wm.enabled = enabled; 2654 intel_plane->wm.enabled = enabled;
2655 intel_plane->wm.scaled = scaled; 2655 intel_plane->wm.scaled = scaled;
2656 intel_plane->wm.horiz_pixels = sprite_width; 2656 intel_plane->wm.horiz_pixels = sprite_width;
2657 intel_plane->wm.bytes_per_pixel = pixel_size; 2657 intel_plane->wm.bytes_per_pixel = pixel_size;
2658 2658
2659 /* 2659 /*
2660 * IVB workaround: must disable low power watermarks for at least 2660 * IVB workaround: must disable low power watermarks for at least
2661 * one frame before enabling scaling. LP watermarks can be re-enabled 2661 * one frame before enabling scaling. LP watermarks can be re-enabled
2662 * when scaling is disabled. 2662 * when scaling is disabled.
2663 * 2663 *
2664 * WaCxSRDisabledForSpriteScaling:ivb 2664 * WaCxSRDisabledForSpriteScaling:ivb
2665 */ 2665 */
2666 if (IS_IVYBRIDGE(dev) && scaled && ilk_disable_lp_wm(dev)) 2666 if (IS_IVYBRIDGE(dev) && scaled && ilk_disable_lp_wm(dev))
2667 intel_wait_for_vblank(dev, intel_plane->pipe); 2667 intel_wait_for_vblank(dev, intel_plane->pipe);
2668 2668
2669 ilk_update_wm(crtc); 2669 ilk_update_wm(crtc);
2670} 2670}
2671 2671
2672static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc) 2672static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
2673{ 2673{
2674 struct drm_device *dev = crtc->dev; 2674 struct drm_device *dev = crtc->dev;
2675 struct drm_i915_private *dev_priv = dev->dev_private; 2675 struct drm_i915_private *dev_priv = dev->dev_private;
2676 struct ilk_wm_values *hw = &dev_priv->wm.hw; 2676 struct ilk_wm_values *hw = &dev_priv->wm.hw;
2677 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2677 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2678 struct intel_pipe_wm *active = &intel_crtc->wm.active; 2678 struct intel_pipe_wm *active = &intel_crtc->wm.active;
2679 enum i915_pipe pipe = intel_crtc->pipe; 2679 enum i915_pipe pipe = intel_crtc->pipe;
2680 static const unsigned int wm0_pipe_reg[] = { 2680 static const unsigned int wm0_pipe_reg[] = {
2681 [PIPE_A] = WM0_PIPEA_ILK, 2681 [PIPE_A] = WM0_PIPEA_ILK,
2682 [PIPE_B] = WM0_PIPEB_ILK, 2682 [PIPE_B] = WM0_PIPEB_ILK,
2683 [PIPE_C] = WM0_PIPEC_IVB, 2683 [PIPE_C] = WM0_PIPEC_IVB,
2684 }; 2684 };
2685 2685
2686 hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]); 2686 hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
2687 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 2687 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2688 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe)); 2688 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
2689 2689
2690 if (intel_crtc_active(crtc)) { 2690 if (intel_crtc_active(crtc)) {
2691 u32 tmp = hw->wm_pipe[pipe]; 2691 u32 tmp = hw->wm_pipe[pipe];
2692 2692
2693 /* 2693 /*
2694 * For active pipes LP0 watermark is marked as 2694 * For active pipes LP0 watermark is marked as
2695 * enabled, and LP1+ watermaks as disabled since 2695 * enabled, and LP1+ watermaks as disabled since
2696 * we can't really reverse compute them in case 2696 * we can't really reverse compute them in case
2697 * multiple pipes are active. 2697 * multiple pipes are active.
2698 */ 2698 */
2699 active->wm[0].enable = true; 2699 active->wm[0].enable = true;
2700 active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT; 2700 active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
2701 active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT; 2701 active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
2702 active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK; 2702 active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
2703 active->linetime = hw->wm_linetime[pipe]; 2703 active->linetime = hw->wm_linetime[pipe];
2704 } else { 2704 } else {
2705 int level, max_level = ilk_wm_max_level(dev); 2705 int level, max_level = ilk_wm_max_level(dev);
2706 2706
2707 /* 2707 /*
2708 * For inactive pipes, all watermark levels 2708 * For inactive pipes, all watermark levels
2709 * should be marked as enabled but zeroed, 2709 * should be marked as enabled but zeroed,
2710 * which is what we'd compute them to. 2710 * which is what we'd compute them to.
2711 */ 2711 */
2712 for (level = 0; level <= max_level; level++) 2712 for (level = 0; level <= max_level; level++)
2713 active->wm[level].enable = true; 2713 active->wm[level].enable = true;
2714 } 2714 }
2715} 2715}
2716 2716
2717void ilk_wm_get_hw_state(struct drm_device *dev) 2717void ilk_wm_get_hw_state(struct drm_device *dev)
2718{ 2718{
2719 struct drm_i915_private *dev_priv = dev->dev_private; 2719 struct drm_i915_private *dev_priv = dev->dev_private;
2720 struct ilk_wm_values *hw = &dev_priv->wm.hw; 2720 struct ilk_wm_values *hw = &dev_priv->wm.hw;
2721 struct drm_crtc *crtc; 2721 struct drm_crtc *crtc;
2722 2722
2723 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 2723 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
2724 ilk_pipe_wm_get_hw_state(crtc); 2724 ilk_pipe_wm_get_hw_state(crtc);
2725 2725
2726 hw->wm_lp[0] = I915_READ(WM1_LP_ILK); 2726 hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
2727 hw->wm_lp[1] = I915_READ(WM2_LP_ILK); 2727 hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
2728 hw->wm_lp[2] = I915_READ(WM3_LP_ILK); 2728 hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
2729 2729
2730 hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK); 2730 hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
2731 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB); 2731 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
2732 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB); 2732 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
2733 2733
2734 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 2734 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2735 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ? 2735 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
2736 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; 2736 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
2737 else if (IS_IVYBRIDGE(dev)) 2737 else if (IS_IVYBRIDGE(dev))
2738 hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ? 2738 hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
2739 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; 2739 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
2740 2740
2741 hw->enable_fbc_wm = 2741 hw->enable_fbc_wm =
2742 !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS); 2742 !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
2743} 2743}
2744 2744
2745/** 2745/**
2746 * intel_update_watermarks - update FIFO watermark values based on current modes 2746 * intel_update_watermarks - update FIFO watermark values based on current modes
2747 * 2747 *
2748 * Calculate watermark values for the various WM regs based on current mode 2748 * Calculate watermark values for the various WM regs based on current mode
2749 * and plane configuration. 2749 * and plane configuration.
2750 * 2750 *
2751 * There are several cases to deal with here: 2751 * There are several cases to deal with here:
2752 * - normal (i.e. non-self-refresh) 2752 * - normal (i.e. non-self-refresh)
2753 * - self-refresh (SR) mode 2753 * - self-refresh (SR) mode
2754 * - lines are large relative to FIFO size (buffer can hold up to 2) 2754 * - lines are large relative to FIFO size (buffer can hold up to 2)
2755 * - lines are small relative to FIFO size (buffer can hold more than 2 2755 * - lines are small relative to FIFO size (buffer can hold more than 2
2756 * lines), so need to account for TLB latency 2756 * lines), so need to account for TLB latency
2757 * 2757 *
2758 * The normal calculation is: 2758 * The normal calculation is:
2759 * watermark = dotclock * bytes per pixel * latency 2759 * watermark = dotclock * bytes per pixel * latency
2760 * where latency is platform & configuration dependent (we assume pessimal 2760 * where latency is platform & configuration dependent (we assume pessimal
2761 * values here). 2761 * values here).
2762 * 2762 *
2763 * The SR calculation is: 2763 * The SR calculation is:
2764 * watermark = (trunc(latency/line time)+1) * surface width * 2764 * watermark = (trunc(latency/line time)+1) * surface width *
2765 * bytes per pixel 2765 * bytes per pixel
2766 * where 2766 * where
2767 * line time = htotal / dotclock 2767 * line time = htotal / dotclock
2768 * surface width = hdisplay for normal plane and 64 for cursor 2768 * surface width = hdisplay for normal plane and 64 for cursor
2769 * and latency is assumed to be high, as above. 2769 * and latency is assumed to be high, as above.
2770 * 2770 *
2771 * The final value programmed to the register should always be rounded up, 2771 * The final value programmed to the register should always be rounded up,
2772 * and include an extra 2 entries to account for clock crossings. 2772 * and include an extra 2 entries to account for clock crossings.
2773 * 2773 *
2774 * We don't use the sprite, so we can ignore that. And on Crestline we have 2774 * We don't use the sprite, so we can ignore that. And on Crestline we have
2775 * to set the non-SR watermarks to 8. 2775 * to set the non-SR watermarks to 8.
2776 */ 2776 */
2777void intel_update_watermarks(struct drm_crtc *crtc) 2777void intel_update_watermarks(struct drm_crtc *crtc)
2778{ 2778{
2779 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 2779 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
2780 2780
2781 if (dev_priv->display.update_wm) 2781 if (dev_priv->display.update_wm)
2782 dev_priv->display.update_wm(crtc); 2782 dev_priv->display.update_wm(crtc);
2783} 2783}
2784 2784
2785void intel_update_sprite_watermarks(struct drm_plane *plane, 2785void intel_update_sprite_watermarks(struct drm_plane *plane,
2786 struct drm_crtc *crtc, 2786 struct drm_crtc *crtc,
2787 uint32_t sprite_width, int pixel_size, 2787 uint32_t sprite_width, int pixel_size,
2788 bool enabled, bool scaled) 2788 bool enabled, bool scaled)
2789{ 2789{
2790 struct drm_i915_private *dev_priv = plane->dev->dev_private; 2790 struct drm_i915_private *dev_priv = plane->dev->dev_private;
2791 2791
2792 if (dev_priv->display.update_sprite_wm) 2792 if (dev_priv->display.update_sprite_wm)
2793 dev_priv->display.update_sprite_wm(plane, crtc, sprite_width, 2793 dev_priv->display.update_sprite_wm(plane, crtc, sprite_width,
2794 pixel_size, enabled, scaled); 2794 pixel_size, enabled, scaled);
2795} 2795}
2796 2796
2797static struct drm_i915_gem_object * 2797static struct drm_i915_gem_object *
2798intel_alloc_context_page(struct drm_device *dev) 2798intel_alloc_context_page(struct drm_device *dev)
2799{ 2799{
2800 struct drm_i915_gem_object *ctx; 2800 struct drm_i915_gem_object *ctx;
2801 int ret; 2801 int ret;
2802 2802
2803 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 2803 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2804 2804
2805 ctx = i915_gem_alloc_object(dev, 4096); 2805 ctx = i915_gem_alloc_object(dev, 4096);
2806 if (!ctx) { 2806 if (!ctx) {
2807 DRM_DEBUG("failed to alloc power context, RC6 disabled\n"); 2807 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
2808 return NULL; 2808 return NULL;
2809 } 2809 }
2810 2810
2811 ret = i915_gem_obj_ggtt_pin(ctx, 4096, 0); 2811 ret = i915_gem_obj_ggtt_pin(ctx, 4096, 0);
2812 if (ret) { 2812 if (ret) {
2813 DRM_ERROR("failed to pin power context: %d\n", ret); 2813 DRM_ERROR("failed to pin power context: %d\n", ret);
2814 goto err_unref; 2814 goto err_unref;
2815 } 2815 }
2816 2816
2817 ret = i915_gem_object_set_to_gtt_domain(ctx, 1); 2817 ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
2818 if (ret) { 2818 if (ret) {
2819 DRM_ERROR("failed to set-domain on power context: %d\n", ret); 2819 DRM_ERROR("failed to set-domain on power context: %d\n", ret);
2820 goto err_unpin; 2820 goto err_unpin;
2821 } 2821 }
2822 2822
2823 return ctx; 2823 return ctx;
2824 2824
2825err_unpin: 2825err_unpin:
2826 i915_gem_object_ggtt_unpin(ctx); 2826 i915_gem_object_ggtt_unpin(ctx);
2827err_unref: 2827err_unref:
2828 drm_gem_object_unreference(&ctx->base); 2828 drm_gem_object_unreference(&ctx->base);
2829 return NULL; 2829 return NULL;
2830} 2830}
2831 2831
2832/** 2832/**
2833 * Lock protecting IPS related data structures 2833 * Lock protecting IPS related data structures
2834 */ 2834 */
2835#ifdef __NetBSD__ 2835#ifdef __NetBSD__
2836spinlock_t mchdev_lock; 2836spinlock_t mchdev_lock;
2837#else 2837#else
2838DEFINE_SPINLOCK(mchdev_lock); 2838DEFINE_SPINLOCK(mchdev_lock);
2839#endif 2839#endif
2840 2840
2841/* Global for IPS driver to get at the current i915 device. Protected by 2841/* Global for IPS driver to get at the current i915 device. Protected by
2842 * mchdev_lock. */ 2842 * mchdev_lock. */
2843static struct drm_i915_private *i915_mch_dev; 2843static struct drm_i915_private *i915_mch_dev;
2844 2844
2845bool ironlake_set_drps(struct drm_device *dev, u8 val) 2845bool ironlake_set_drps(struct drm_device *dev, u8 val)
2846{ 2846{
2847 struct drm_i915_private *dev_priv = dev->dev_private; 2847 struct drm_i915_private *dev_priv = dev->dev_private;
2848 u16 rgvswctl; 2848 u16 rgvswctl;
2849 2849
2850 assert_spin_locked(&mchdev_lock); 2850 assert_spin_locked(&mchdev_lock);
2851 2851
2852 rgvswctl = I915_READ16(MEMSWCTL); 2852 rgvswctl = I915_READ16(MEMSWCTL);
2853 if (rgvswctl & MEMCTL_CMD_STS) { 2853 if (rgvswctl & MEMCTL_CMD_STS) {
2854 DRM_DEBUG("gpu busy, RCS change rejected\n"); 2854 DRM_DEBUG("gpu busy, RCS change rejected\n");
2855 return false; /* still busy with another command */ 2855 return false; /* still busy with another command */
2856 } 2856 }
2857 2857
2858 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | 2858 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
2859 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; 2859 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
2860 I915_WRITE16(MEMSWCTL, rgvswctl); 2860 I915_WRITE16(MEMSWCTL, rgvswctl);
2861 POSTING_READ16(MEMSWCTL); 2861 POSTING_READ16(MEMSWCTL);
2862 2862
2863 rgvswctl |= MEMCTL_CMD_STS; 2863 rgvswctl |= MEMCTL_CMD_STS;
2864 I915_WRITE16(MEMSWCTL, rgvswctl); 2864 I915_WRITE16(MEMSWCTL, rgvswctl);
2865 2865
2866 return true; 2866 return true;
2867} 2867}
2868 2868
2869static void ironlake_enable_drps(struct drm_device *dev) 2869static void ironlake_enable_drps(struct drm_device *dev)
2870{ 2870{
2871 struct drm_i915_private *dev_priv = dev->dev_private; 2871 struct drm_i915_private *dev_priv = dev->dev_private;
2872 u32 rgvmodectl = I915_READ(MEMMODECTL); 2872 u32 rgvmodectl = I915_READ(MEMMODECTL);
2873 u8 fmax, fmin, fstart, vstart; 2873 u8 fmax, fmin, fstart, vstart;
2874 2874
2875 spin_lock_irq(&mchdev_lock); 2875 spin_lock_irq(&mchdev_lock);
2876 2876
2877 /* Enable temp reporting */ 2877 /* Enable temp reporting */
2878 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN); 2878 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
2879 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE); 2879 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
2880 2880
2881 /* 100ms RC evaluation intervals */ 2881 /* 100ms RC evaluation intervals */
2882 I915_WRITE(RCUPEI, 100000); 2882 I915_WRITE(RCUPEI, 100000);
2883 I915_WRITE(RCDNEI, 100000); 2883 I915_WRITE(RCDNEI, 100000);
2884 2884
2885 /* Set max/min thresholds to 90ms and 80ms respectively */ 2885 /* Set max/min thresholds to 90ms and 80ms respectively */
2886 I915_WRITE(RCBMAXAVG, 90000); 2886 I915_WRITE(RCBMAXAVG, 90000);
2887 I915_WRITE(RCBMINAVG, 80000); 2887 I915_WRITE(RCBMINAVG, 80000);
2888 2888
2889 I915_WRITE(MEMIHYST, 1); 2889 I915_WRITE(MEMIHYST, 1);
2890 2890
2891 /* Set up min, max, and cur for interrupt handling */ 2891 /* Set up min, max, and cur for interrupt handling */
2892 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT; 2892 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
2893 fmin = (rgvmodectl & MEMMODE_FMIN_MASK); 2893 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
2894 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> 2894 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
2895 MEMMODE_FSTART_SHIFT; 2895 MEMMODE_FSTART_SHIFT;
2896 2896
2897 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> 2897 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
2898 PXVFREQ_PX_SHIFT; 2898 PXVFREQ_PX_SHIFT;
2899 2899
2900 dev_priv->ips.fmax = fmax; /* IPS callback will increase this */ 2900 dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
2901 dev_priv->ips.fstart = fstart; 2901 dev_priv->ips.fstart = fstart;
2902 2902
2903 dev_priv->ips.max_delay = fstart; 2903 dev_priv->ips.max_delay = fstart;
2904 dev_priv->ips.min_delay = fmin; 2904 dev_priv->ips.min_delay = fmin;
2905 dev_priv->ips.cur_delay = fstart; 2905 dev_priv->ips.cur_delay = fstart;
2906 2906
2907 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", 2907 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
2908 fmax, fmin, fstart); 2908 fmax, fmin, fstart);
2909 2909
2910 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); 2910 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
2911 2911
2912 /* 2912 /*
2913 * Interrupts will be enabled in ironlake_irq_postinstall 2913 * Interrupts will be enabled in ironlake_irq_postinstall
2914 */ 2914 */
2915 2915
2916 I915_WRITE(VIDSTART, vstart); 2916 I915_WRITE(VIDSTART, vstart);
2917 POSTING_READ(VIDSTART); 2917 POSTING_READ(VIDSTART);
2918 2918
2919 rgvmodectl |= MEMMODE_SWMODE_EN; 2919 rgvmodectl |= MEMMODE_SWMODE_EN;
2920 I915_WRITE(MEMMODECTL, rgvmodectl); 2920 I915_WRITE(MEMMODECTL, rgvmodectl);
2921 2921
2922 if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10)) 2922 if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
2923 DRM_ERROR("stuck trying to change perf mode\n"); 2923 DRM_ERROR("stuck trying to change perf mode\n");
2924 mdelay(1); 2924 mdelay(1);
2925 2925
2926 ironlake_set_drps(dev, fstart); 2926 ironlake_set_drps(dev, fstart);
2927 2927
2928 dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) + 2928 dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
2929 I915_READ(0x112e0); 2929 I915_READ(0x112e0);
2930 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies); 2930 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
2931 dev_priv->ips.last_count2 = I915_READ(0x112f4); 2931 dev_priv->ips.last_count2 = I915_READ(0x112f4);
2932 getrawmonotonic(&dev_priv->ips.last_time2); 2932 getrawmonotonic(&dev_priv->ips.last_time2);
2933 2933
2934 spin_unlock_irq(&mchdev_lock); 2934 spin_unlock_irq(&mchdev_lock);
2935} 2935}
2936 2936
2937static void ironlake_disable_drps(struct drm_device *dev) 2937static void ironlake_disable_drps(struct drm_device *dev)
2938{ 2938{
2939 struct drm_i915_private *dev_priv = dev->dev_private; 2939 struct drm_i915_private *dev_priv = dev->dev_private;
2940 u16 rgvswctl; 2940 u16 rgvswctl;
2941 2941
2942 spin_lock_irq(&mchdev_lock); 2942 spin_lock_irq(&mchdev_lock);
2943 2943
2944 rgvswctl = I915_READ16(MEMSWCTL); 2944 rgvswctl = I915_READ16(MEMSWCTL);
2945 2945
2946 /* Ack interrupts, disable EFC interrupt */ 2946 /* Ack interrupts, disable EFC interrupt */
2947 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN); 2947 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
2948 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG); 2948 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
2949 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT); 2949 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
2950 I915_WRITE(DEIIR, DE_PCU_EVENT); 2950 I915_WRITE(DEIIR, DE_PCU_EVENT);
2951 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT); 2951 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
2952 2952
2953 /* Go back to the starting frequency */ 2953 /* Go back to the starting frequency */
2954 ironlake_set_drps(dev, dev_priv->ips.fstart); 2954 ironlake_set_drps(dev, dev_priv->ips.fstart);
2955 mdelay(1); 2955 mdelay(1);
2956 rgvswctl |= MEMCTL_CMD_STS; 2956 rgvswctl |= MEMCTL_CMD_STS;
2957 I915_WRITE(MEMSWCTL, rgvswctl); 2957 I915_WRITE(MEMSWCTL, rgvswctl);
2958 mdelay(1); 2958 mdelay(1);
2959 2959
2960 spin_unlock_irq(&mchdev_lock); 2960 spin_unlock_irq(&mchdev_lock);
2961} 2961}
2962 2962
2963/* There's a funny hw issue where the hw returns all 0 when reading from 2963/* There's a funny hw issue where the hw returns all 0 when reading from
2964 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value 2964 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
2965 * ourselves, instead of doing a rmw cycle (which might result in us clearing 2965 * ourselves, instead of doing a rmw cycle (which might result in us clearing
2966 * all limits and the gpu stuck at whatever frequency it is at atm). 2966 * all limits and the gpu stuck at whatever frequency it is at atm).
2967 */ 2967 */
2968static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val) 2968static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val)
2969{ 2969{
2970 u32 limits; 2970 u32 limits;
2971 2971
2972 /* Only set the down limit when we've reached the lowest level to avoid 2972 /* Only set the down limit when we've reached the lowest level to avoid
2973 * getting more interrupts, otherwise leave this clear. This prevents a 2973 * getting more interrupts, otherwise leave this clear. This prevents a
2974 * race in the hw when coming out of rc6: There's a tiny window where 2974 * race in the hw when coming out of rc6: There's a tiny window where
2975 * the hw runs at the minimal clock before selecting the desired 2975 * the hw runs at the minimal clock before selecting the desired
2976 * frequency, if the down threshold expires in that window we will not 2976 * frequency, if the down threshold expires in that window we will not
2977 * receive a down interrupt. */ 2977 * receive a down interrupt. */
2978 limits = dev_priv->rps.max_freq_softlimit << 24; 2978 limits = dev_priv->rps.max_freq_softlimit << 24;
2979 if (val <= dev_priv->rps.min_freq_softlimit) 2979 if (val <= dev_priv->rps.min_freq_softlimit)
2980 limits |= dev_priv->rps.min_freq_softlimit << 16; 2980 limits |= dev_priv->rps.min_freq_softlimit << 16;
2981 2981
2982 return limits; 2982 return limits;
2983} 2983}
2984 2984
2985static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val) 2985static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
2986{ 2986{
2987 int new_power; 2987 int new_power;
2988 2988
2989 new_power = dev_priv->rps.power; 2989 new_power = dev_priv->rps.power;
2990 switch (dev_priv->rps.power) { 2990 switch (dev_priv->rps.power) {
2991 case LOW_POWER: 2991 case LOW_POWER:
2992 if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq) 2992 if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq)
2993 new_power = BETWEEN; 2993 new_power = BETWEEN;
2994 break; 2994 break;
2995 2995
2996 case BETWEEN: 2996 case BETWEEN:
2997 if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq) 2997 if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq)
2998 new_power = LOW_POWER; 2998 new_power = LOW_POWER;
2999 else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq) 2999 else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq)
3000 new_power = HIGH_POWER; 3000 new_power = HIGH_POWER;
3001 break; 3001 break;
3002 3002
3003 case HIGH_POWER: 3003 case HIGH_POWER:
3004 if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq) 3004 if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq)
3005 new_power = BETWEEN; 3005 new_power = BETWEEN;
3006 break; 3006 break;
3007 } 3007 }
3008 /* Max/min bins are special */ 3008 /* Max/min bins are special */
3009 if (val == dev_priv->rps.min_freq_softlimit) 3009 if (val == dev_priv->rps.min_freq_softlimit)
3010 new_power = LOW_POWER; 3010 new_power = LOW_POWER;
3011 if (val == dev_priv->rps.max_freq_softlimit) 3011 if (val == dev_priv->rps.max_freq_softlimit)
3012 new_power = HIGH_POWER; 3012 new_power = HIGH_POWER;
3013 if (new_power == dev_priv->rps.power) 3013 if (new_power == dev_priv->rps.power)
3014 return; 3014 return;
3015 3015
3016 /* Note the units here are not exactly 1us, but 1280ns. */ 3016 /* Note the units here are not exactly 1us, but 1280ns. */
3017 switch (new_power) { 3017 switch (new_power) {
3018 case LOW_POWER: 3018 case LOW_POWER:
3019 /* Upclock if more than 95% busy over 16ms */ 3019 /* Upclock if more than 95% busy over 16ms */
3020 I915_WRITE(GEN6_RP_UP_EI, 12500); 3020 I915_WRITE(GEN6_RP_UP_EI, 12500);
3021 I915_WRITE(GEN6_RP_UP_THRESHOLD, 11800); 3021 I915_WRITE(GEN6_RP_UP_THRESHOLD, 11800);
3022 3022
3023 /* Downclock if less than 85% busy over 32ms */ 3023 /* Downclock if less than 85% busy over 32ms */
3024 I915_WRITE(GEN6_RP_DOWN_EI, 25000); 3024 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
3025 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 21250); 3025 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 21250);
3026 3026
3027 I915_WRITE(GEN6_RP_CONTROL, 3027 I915_WRITE(GEN6_RP_CONTROL,
3028 GEN6_RP_MEDIA_TURBO | 3028 GEN6_RP_MEDIA_TURBO |
3029 GEN6_RP_MEDIA_HW_NORMAL_MODE | 3029 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3030 GEN6_RP_MEDIA_IS_GFX | 3030 GEN6_RP_MEDIA_IS_GFX |
3031 GEN6_RP_ENABLE | 3031 GEN6_RP_ENABLE |
3032 GEN6_RP_UP_BUSY_AVG | 3032 GEN6_RP_UP_BUSY_AVG |
3033 GEN6_RP_DOWN_IDLE_AVG); 3033 GEN6_RP_DOWN_IDLE_AVG);
3034 break; 3034 break;
3035 3035
3036 case BETWEEN: 3036 case BETWEEN:
3037 /* Upclock if more than 90% busy over 13ms */ 3037 /* Upclock if more than 90% busy over 13ms */
3038 I915_WRITE(GEN6_RP_UP_EI, 10250); 3038 I915_WRITE(GEN6_RP_UP_EI, 10250);
3039 I915_WRITE(GEN6_RP_UP_THRESHOLD, 9225); 3039 I915_WRITE(GEN6_RP_UP_THRESHOLD, 9225);
3040 3040
3041 /* Downclock if less than 75% busy over 32ms */ 3041 /* Downclock if less than 75% busy over 32ms */
3042 I915_WRITE(GEN6_RP_DOWN_EI, 25000); 3042 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
3043 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 18750); 3043 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 18750);
3044 3044
3045 I915_WRITE(GEN6_RP_CONTROL, 3045 I915_WRITE(GEN6_RP_CONTROL,
3046 GEN6_RP_MEDIA_TURBO | 3046 GEN6_RP_MEDIA_TURBO |
3047 GEN6_RP_MEDIA_HW_NORMAL_MODE | 3047 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3048 GEN6_RP_MEDIA_IS_GFX | 3048 GEN6_RP_MEDIA_IS_GFX |
3049 GEN6_RP_ENABLE | 3049 GEN6_RP_ENABLE |
3050 GEN6_RP_UP_BUSY_AVG | 3050 GEN6_RP_UP_BUSY_AVG |
3051 GEN6_RP_DOWN_IDLE_AVG); 3051 GEN6_RP_DOWN_IDLE_AVG);
3052 break; 3052 break;
3053 3053
3054 case HIGH_POWER: 3054 case HIGH_POWER:
3055 /* Upclock if more than 85% busy over 10ms */ 3055 /* Upclock if more than 85% busy over 10ms */
3056 I915_WRITE(GEN6_RP_UP_EI, 8000); 3056 I915_WRITE(GEN6_RP_UP_EI, 8000);
3057 I915_WRITE(GEN6_RP_UP_THRESHOLD, 6800); 3057 I915_WRITE(GEN6_RP_UP_THRESHOLD, 6800);
3058 3058
3059 /* Downclock if less than 60% busy over 32ms */ 3059 /* Downclock if less than 60% busy over 32ms */
3060 I915_WRITE(GEN6_RP_DOWN_EI, 25000); 3060 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
3061 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 15000); 3061 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 15000);
3062 3062
3063 I915_WRITE(GEN6_RP_CONTROL, 3063 I915_WRITE(GEN6_RP_CONTROL,
3064 GEN6_RP_MEDIA_TURBO | 3064 GEN6_RP_MEDIA_TURBO |
3065 GEN6_RP_MEDIA_HW_NORMAL_MODE | 3065 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3066 GEN6_RP_MEDIA_IS_GFX | 3066 GEN6_RP_MEDIA_IS_GFX |
3067 GEN6_RP_ENABLE | 3067 GEN6_RP_ENABLE |
3068 GEN6_RP_UP_BUSY_AVG | 3068 GEN6_RP_UP_BUSY_AVG |
3069 GEN6_RP_DOWN_IDLE_AVG); 3069 GEN6_RP_DOWN_IDLE_AVG);
3070 break; 3070 break;
3071 } 3071 }
3072 3072
3073 dev_priv->rps.power = new_power; 3073 dev_priv->rps.power = new_power;
3074 dev_priv->rps.last_adj = 0; 3074 dev_priv->rps.last_adj = 0;
3075} 3075}
3076 3076
3077static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val) 3077static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
3078{ 3078{
3079 u32 mask = 0; 3079 u32 mask = 0;
3080 3080
3081 if (val > dev_priv->rps.min_freq_softlimit) 3081 if (val > dev_priv->rps.min_freq_softlimit)
3082 mask |= GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT; 3082 mask |= GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
3083 if (val < dev_priv->rps.max_freq_softlimit) 3083 if (val < dev_priv->rps.max_freq_softlimit)
3084 mask |= GEN6_PM_RP_UP_THRESHOLD; 3084 mask |= GEN6_PM_RP_UP_THRESHOLD;
3085 3085
3086 /* IVB and SNB hard hangs on looping batchbuffer 3086 /* IVB and SNB hard hangs on looping batchbuffer
3087 * if GEN6_PM_UP_EI_EXPIRED is masked. 3087 * if GEN6_PM_UP_EI_EXPIRED is masked.
3088 */ 3088 */
3089 if (INTEL_INFO(dev_priv->dev)->gen <= 7 && !IS_HASWELL(dev_priv->dev)) 3089 if (INTEL_INFO(dev_priv->dev)->gen <= 7 && !IS_HASWELL(dev_priv->dev))
3090 mask |= GEN6_PM_RP_UP_EI_EXPIRED; 3090 mask |= GEN6_PM_RP_UP_EI_EXPIRED;
3091 3091
3092 return ~mask; 3092 return ~mask;
3093} 3093}
3094 3094
3095/* gen6_set_rps is called to update the frequency request, but should also be 3095/* gen6_set_rps is called to update the frequency request, but should also be
3096 * called when the range (min_delay and max_delay) is modified so that we can 3096 * called when the range (min_delay and max_delay) is modified so that we can
3097 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */ 3097 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
3098void gen6_set_rps(struct drm_device *dev, u8 val) 3098void gen6_set_rps(struct drm_device *dev, u8 val)
3099{ 3099{
3100 struct drm_i915_private *dev_priv = dev->dev_private; 3100 struct drm_i915_private *dev_priv = dev->dev_private;
3101 3101
3102 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 3102 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3103 WARN_ON(val > dev_priv->rps.max_freq_softlimit); 3103 WARN_ON(val > dev_priv->rps.max_freq_softlimit);
3104 WARN_ON(val < dev_priv->rps.min_freq_softlimit); 3104 WARN_ON(val < dev_priv->rps.min_freq_softlimit);
3105 3105
3106 /* min/max delay may still have been modified so be sure to 3106 /* min/max delay may still have been modified so be sure to
3107 * write the limits value. 3107 * write the limits value.
3108 */ 3108 */
3109 if (val != dev_priv->rps.cur_freq) { 3109 if (val != dev_priv->rps.cur_freq) {
3110 gen6_set_rps_thresholds(dev_priv, val); 3110 gen6_set_rps_thresholds(dev_priv, val);
3111 3111
3112 if (IS_HASWELL(dev)) 3112 if (IS_HASWELL(dev))
3113 I915_WRITE(GEN6_RPNSWREQ, 3113 I915_WRITE(GEN6_RPNSWREQ,
3114 HSW_FREQUENCY(val)); 3114 HSW_FREQUENCY(val));
3115 else 3115 else
3116 I915_WRITE(GEN6_RPNSWREQ, 3116 I915_WRITE(GEN6_RPNSWREQ,
3117 GEN6_FREQUENCY(val) | 3117 GEN6_FREQUENCY(val) |
3118 GEN6_OFFSET(0) | 3118 GEN6_OFFSET(0) |
3119 GEN6_AGGRESSIVE_TURBO); 3119 GEN6_AGGRESSIVE_TURBO);
3120 } 3120 }
3121 3121
3122 /* Make sure we continue to get interrupts 3122 /* Make sure we continue to get interrupts
3123 * until we hit the minimum or maximum frequencies. 3123 * until we hit the minimum or maximum frequencies.
3124 */ 3124 */
3125 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, gen6_rps_limits(dev_priv, val)); 3125 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, gen6_rps_limits(dev_priv, val));
3126 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val)); 3126 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
3127 3127
3128 POSTING_READ(GEN6_RPNSWREQ); 3128 POSTING_READ(GEN6_RPNSWREQ);
3129 3129
3130 dev_priv->rps.cur_freq = val; 3130 dev_priv->rps.cur_freq = val;
3131 trace_intel_gpu_freq_change(val * 50); 3131 trace_intel_gpu_freq_change(val * 50);
3132} 3132}
3133 3133
3134/* vlv_set_rps_idle: Set the frequency to Rpn if Gfx clocks are down 3134/* vlv_set_rps_idle: Set the frequency to Rpn if Gfx clocks are down
3135 * 3135 *
3136 * * If Gfx is Idle, then 3136 * * If Gfx is Idle, then
3137 * 1. Mask Turbo interrupts 3137 * 1. Mask Turbo interrupts
3138 * 2. Bring up Gfx clock 3138 * 2. Bring up Gfx clock
3139 * 3. Change the freq to Rpn and wait till P-Unit updates freq 3139 * 3. Change the freq to Rpn and wait till P-Unit updates freq
3140 * 4. Clear the Force GFX CLK ON bit so that Gfx can down 3140 * 4. Clear the Force GFX CLK ON bit so that Gfx can down
3141 * 5. Unmask Turbo interrupts 3141 * 5. Unmask Turbo interrupts
3142*/ 3142*/
3143static void vlv_set_rps_idle(struct drm_i915_private *dev_priv) 3143static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
3144{ 3144{
3145 /* 3145 /*
3146 * When we are idle. Drop to min voltage state. 3146 * When we are idle. Drop to min voltage state.
3147 */ 3147 */
3148 3148
3149 if (dev_priv->rps.cur_freq <= dev_priv->rps.min_freq_softlimit) 3149 if (dev_priv->rps.cur_freq <= dev_priv->rps.min_freq_softlimit)
3150 return; 3150 return;
3151 3151
3152 /* Mask turbo interrupt so that they will not come in between */ 3152 /* Mask turbo interrupt so that they will not come in between */
3153 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); 3153 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
3154 3154
3155 /* Bring up the Gfx clock */ 3155 /* Bring up the Gfx clock */
3156 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, 3156 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG,
3157 I915_READ(VLV_GTLC_SURVIVABILITY_REG) | 3157 I915_READ(VLV_GTLC_SURVIVABILITY_REG) |
3158 VLV_GFX_CLK_FORCE_ON_BIT); 3158 VLV_GFX_CLK_FORCE_ON_BIT);
3159 3159
3160 if (wait_for(((VLV_GFX_CLK_STATUS_BIT & 3160 if (wait_for(((VLV_GFX_CLK_STATUS_BIT &
3161 I915_READ(VLV_GTLC_SURVIVABILITY_REG)) != 0), 5)) { 3161 I915_READ(VLV_GTLC_SURVIVABILITY_REG)) != 0), 5)) {
3162 DRM_ERROR("GFX_CLK_ON request timed out\n"); 3162 DRM_ERROR("GFX_CLK_ON request timed out\n");
3163 return; 3163 return;
3164 } 3164 }
3165 3165
3166 dev_priv->rps.cur_freq = dev_priv->rps.min_freq_softlimit; 3166 dev_priv->rps.cur_freq = dev_priv->rps.min_freq_softlimit;
3167 3167
3168 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, 3168 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ,
3169 dev_priv->rps.min_freq_softlimit); 3169 dev_priv->rps.min_freq_softlimit);
3170 3170
3171 if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS)) 3171 if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS))
3172 & GENFREQSTATUS) == 0, 5)) 3172 & GENFREQSTATUS) == 0, 5))
3173 DRM_ERROR("timed out waiting for Punit\n"); 3173 DRM_ERROR("timed out waiting for Punit\n");
3174 3174
3175 /* Release the Gfx clock */ 3175 /* Release the Gfx clock */
3176 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, 3176 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG,
3177 I915_READ(VLV_GTLC_SURVIVABILITY_REG) & 3177 I915_READ(VLV_GTLC_SURVIVABILITY_REG) &
3178 ~VLV_GFX_CLK_FORCE_ON_BIT); 3178 ~VLV_GFX_CLK_FORCE_ON_BIT);
3179 3179
3180 I915_WRITE(GEN6_PMINTRMSK, 3180 I915_WRITE(GEN6_PMINTRMSK,
3181 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq)); 3181 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
3182} 3182}
3183 3183
3184void gen6_rps_idle(struct drm_i915_private *dev_priv) 3184void gen6_rps_idle(struct drm_i915_private *dev_priv)
3185{ 3185{
3186 struct drm_device *dev = dev_priv->dev; 3186 struct drm_device *dev = dev_priv->dev;
3187 3187
3188 mutex_lock(&dev_priv->rps.hw_lock); 3188 mutex_lock(&dev_priv->rps.hw_lock);
3189 if (dev_priv->rps.enabled) { 3189 if (dev_priv->rps.enabled) {
3190 if (IS_VALLEYVIEW(dev)) 3190 if (IS_VALLEYVIEW(dev))
3191 vlv_set_rps_idle(dev_priv); 3191 vlv_set_rps_idle(dev_priv);
3192 else 3192 else
3193 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit); 3193 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
3194 dev_priv->rps.last_adj = 0; 3194 dev_priv->rps.last_adj = 0;
3195 } 3195 }
3196 mutex_unlock(&dev_priv->rps.hw_lock); 3196 mutex_unlock(&dev_priv->rps.hw_lock);
3197} 3197}
3198 3198
3199void gen6_rps_boost(struct drm_i915_private *dev_priv) 3199void gen6_rps_boost(struct drm_i915_private *dev_priv)
3200{ 3200{
3201 struct drm_device *dev = dev_priv->dev; 3201 struct drm_device *dev = dev_priv->dev;
3202 3202
3203 mutex_lock(&dev_priv->rps.hw_lock); 3203 mutex_lock(&dev_priv->rps.hw_lock);
3204 if (dev_priv->rps.enabled) { 3204 if (dev_priv->rps.enabled) {
3205 if (IS_VALLEYVIEW(dev)) 3205 if (IS_VALLEYVIEW(dev))
3206 valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit); 3206 valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
3207 else 3207 else
3208 gen6_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit); 3208 gen6_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
3209 dev_priv->rps.last_adj = 0; 3209 dev_priv->rps.last_adj = 0;
3210 } 3210 }
3211 mutex_unlock(&dev_priv->rps.hw_lock); 3211 mutex_unlock(&dev_priv->rps.hw_lock);
3212} 3212}
3213 3213
3214void valleyview_set_rps(struct drm_device *dev, u8 val) 3214void valleyview_set_rps(struct drm_device *dev, u8 val)
3215{ 3215{
3216 struct drm_i915_private *dev_priv = dev->dev_private; 3216 struct drm_i915_private *dev_priv = dev->dev_private;
3217 3217
3218 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 3218 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3219 WARN_ON(val > dev_priv->rps.max_freq_softlimit); 3219 WARN_ON(val > dev_priv->rps.max_freq_softlimit);
3220 WARN_ON(val < dev_priv->rps.min_freq_softlimit); 3220 WARN_ON(val < dev_priv->rps.min_freq_softlimit);
3221 3221
3222 DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n", 3222 DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
3223 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq), 3223 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
3224 dev_priv->rps.cur_freq, 3224 dev_priv->rps.cur_freq,
3225 vlv_gpu_freq(dev_priv, val), val); 3225 vlv_gpu_freq(dev_priv, val), val);
3226 3226
3227 if (val != dev_priv->rps.cur_freq) 3227 if (val != dev_priv->rps.cur_freq)
3228 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val); 3228 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
3229 3229
3230 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val)); 3230 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
3231 3231
3232 dev_priv->rps.cur_freq = val; 3232 dev_priv->rps.cur_freq = val;
3233 trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val)); 3233 trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
3234} 3234}
3235 3235
3236static void gen6_disable_rps_interrupts(struct drm_device *dev) 3236static void gen6_disable_rps_interrupts(struct drm_device *dev)
3237{ 3237{
3238 struct drm_i915_private *dev_priv = dev->dev_private; 3238 struct drm_i915_private *dev_priv = dev->dev_private;
3239 3239
3240 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); 3240 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
3241 I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) & 3241 I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) &
3242 ~dev_priv->pm_rps_events); 3242 ~dev_priv->pm_rps_events);
3243 /* Complete PM interrupt masking here doesn't race with the rps work 3243 /* Complete PM interrupt masking here doesn't race with the rps work
3244 * item again unmasking PM interrupts because that is using a different 3244 * item again unmasking PM interrupts because that is using a different
3245 * register (PMIMR) to mask PM interrupts. The only risk is in leaving 3245 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
3246 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */ 3246 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
3247 3247
3248 spin_lock_irq(&dev_priv->irq_lock); 3248 spin_lock_irq(&dev_priv->irq_lock);
3249 dev_priv->rps.pm_iir = 0; 3249 dev_priv->rps.pm_iir = 0;
3250 spin_unlock_irq(&dev_priv->irq_lock); 3250 spin_unlock_irq(&dev_priv->irq_lock);
3251 3251
3252 I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events); 3252 I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
3253} 3253}
3254 3254
3255static void gen6_disable_rps(struct drm_device *dev) 3255static void gen6_disable_rps(struct drm_device *dev)
3256{ 3256{
3257 struct drm_i915_private *dev_priv = dev->dev_private; 3257 struct drm_i915_private *dev_priv = dev->dev_private;
3258 3258
3259 I915_WRITE(GEN6_RC_CONTROL, 0); 3259 I915_WRITE(GEN6_RC_CONTROL, 0);
3260 I915_WRITE(GEN6_RPNSWREQ, 1 << 31); 3260 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
3261 3261
3262 gen6_disable_rps_interrupts(dev); 3262 gen6_disable_rps_interrupts(dev);
3263} 3263}
3264 3264
3265static void valleyview_disable_rps(struct drm_device *dev) 3265static void valleyview_disable_rps(struct drm_device *dev)
3266{ 3266{
3267 struct drm_i915_private *dev_priv = dev->dev_private; 3267 struct drm_i915_private *dev_priv = dev->dev_private;
3268 3268
3269 I915_WRITE(GEN6_RC_CONTROL, 0); 3269 I915_WRITE(GEN6_RC_CONTROL, 0);
3270 3270
3271 gen6_disable_rps_interrupts(dev); 3271 gen6_disable_rps_interrupts(dev);
3272} 3272}
3273 3273
3274static void intel_print_rc6_info(struct drm_device *dev, u32 mode) 3274static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
3275{ 3275{
3276 DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n", 3276 DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
3277 (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off", 3277 (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
3278 (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off", 3278 (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
3279 (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off"); 3279 (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
3280} 3280}
3281 3281
3282int intel_enable_rc6(const struct drm_device *dev) 3282int intel_enable_rc6(const struct drm_device *dev)
3283{ 3283{
3284 /* No RC6 before Ironlake */ 3284 /* No RC6 before Ironlake */
3285 if (INTEL_INFO(dev)->gen < 5) 3285 if (INTEL_INFO(dev)->gen < 5)
3286 return 0; 3286 return 0;
3287 3287
3288 /* Respect the kernel parameter if it is set */ 3288 /* Respect the kernel parameter if it is set */
3289 if (i915.enable_rc6 >= 0) 3289 if (i915.enable_rc6 >= 0)
3290 return i915.enable_rc6; 3290 return i915.enable_rc6;
3291 3291
3292 /* Disable RC6 on Ironlake */ 3292 /* Disable RC6 on Ironlake */
3293 if (INTEL_INFO(dev)->gen == 5) 3293 if (INTEL_INFO(dev)->gen == 5)
3294 return 0; 3294 return 0;
3295 3295
3296 if (IS_IVYBRIDGE(dev)) 3296 if (IS_IVYBRIDGE(dev))
3297 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); 3297 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
3298 3298
3299 return INTEL_RC6_ENABLE; 3299 return INTEL_RC6_ENABLE;
3300} 3300}
3301 3301
3302static void gen6_enable_rps_interrupts(struct drm_device *dev) 3302static void gen6_enable_rps_interrupts(struct drm_device *dev)
3303{ 3303{
3304 struct drm_i915_private *dev_priv = dev->dev_private; 3304 struct drm_i915_private *dev_priv = dev->dev_private;
3305 3305
3306 spin_lock_irq(&dev_priv->irq_lock); 3306 spin_lock_irq(&dev_priv->irq_lock);
3307 WARN_ON(dev_priv->rps.pm_iir); 3307 WARN_ON(dev_priv->rps.pm_iir);
3308 snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 3308 snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
3309 I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events); 3309 I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
3310 spin_unlock_irq(&dev_priv->irq_lock); 3310 spin_unlock_irq(&dev_priv->irq_lock);
3311} 3311}
3312 3312
3313static void gen8_enable_rps(struct drm_device *dev) 3313static void gen8_enable_rps(struct drm_device *dev)
3314{ 3314{
3315 struct drm_i915_private *dev_priv = dev->dev_private; 3315 struct drm_i915_private *dev_priv = dev->dev_private;
3316 struct intel_ring_buffer *ring; 3316 struct intel_ring_buffer *ring;
3317 uint32_t rc6_mask = 0; 3317 uint32_t rc6_mask = 0;
3318 int unused; 3318 int unused;
3319 3319
3320 /* 1a: Software RC state - RC0 */ 3320 /* 1a: Software RC state - RC0 */
3321 I915_WRITE(GEN6_RC_STATE, 0); 3321 I915_WRITE(GEN6_RC_STATE, 0);
3322 3322
3323 /* 1c & 1d: Get forcewake during program sequence. Although the driver 3323 /* 1c & 1d: Get forcewake during program sequence. Although the driver
3324 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/ 3324 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
3325 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); 3325 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
3326 3326
3327 /* 2a: Disable RC states. */ 3327 /* 2a: Disable RC states. */
3328 I915_WRITE(GEN6_RC_CONTROL, 0); 3328 I915_WRITE(GEN6_RC_CONTROL, 0);
3329 3329
3330 (void)I915_READ(GEN6_RP_STATE_CAP); 3330 (void)I915_READ(GEN6_RP_STATE_CAP);
3331 3331
3332 /* 2b: Program RC6 thresholds.*/ 3332 /* 2b: Program RC6 thresholds.*/
3333 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16); 3333 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
3334 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ 3334 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
3335 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ 3335 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
3336 for_each_ring(ring, dev_priv, unused) 3336 for_each_ring(ring, dev_priv, unused)
3337 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); 3337 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
3338 I915_WRITE(GEN6_RC_SLEEP, 0); 3338 I915_WRITE(GEN6_RC_SLEEP, 0);
3339 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */ 3339 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
3340 3340
3341 /* 3: Enable RC6 */ 3341 /* 3: Enable RC6 */
3342 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) 3342 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
3343 rc6_mask = GEN6_RC_CTL_RC6_ENABLE; 3343 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
3344 intel_print_rc6_info(dev, rc6_mask); 3344 intel_print_rc6_info(dev, rc6_mask);
3345 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | 3345 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
3346 GEN6_RC_CTL_EI_MODE(1) | 3346 GEN6_RC_CTL_EI_MODE(1) |
3347 rc6_mask); 3347 rc6_mask);
3348 3348
3349 /* 4 Program defaults and thresholds for RPS*/ 3349 /* 4 Program defaults and thresholds for RPS*/
3350 I915_WRITE(GEN6_RPNSWREQ, HSW_FREQUENCY(10)); /* Request 500 MHz */ 3350 I915_WRITE(GEN6_RPNSWREQ, HSW_FREQUENCY(10)); /* Request 500 MHz */
3351 I915_WRITE(GEN6_RC_VIDEO_FREQ, HSW_FREQUENCY(12)); /* Request 600 MHz */ 3351 I915_WRITE(GEN6_RC_VIDEO_FREQ, HSW_FREQUENCY(12)); /* Request 600 MHz */
3352 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */ 3352 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
3353 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */ 3353 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
3354 3354
3355 /* Docs recommend 900MHz, and 300 MHz respectively */ 3355 /* Docs recommend 900MHz, and 300 MHz respectively */
3356 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, 3356 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
3357 dev_priv->rps.max_freq_softlimit << 24 | 3357 dev_priv->rps.max_freq_softlimit << 24 |
3358 dev_priv->rps.min_freq_softlimit << 16); 3358 dev_priv->rps.min_freq_softlimit << 16);
3359 3359
3360 I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */ 3360 I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
3361 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/ 3361 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
3362 I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */ 3362 I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
3363 I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */ 3363 I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
3364 3364
3365 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); 3365 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
3366 3366
3367 /* 5: Enable RPS */ 3367 /* 5: Enable RPS */
3368 I915_WRITE(GEN6_RP_CONTROL, 3368 I915_WRITE(GEN6_RP_CONTROL,
3369 GEN6_RP_MEDIA_TURBO | 3369 GEN6_RP_MEDIA_TURBO |
3370 GEN6_RP_MEDIA_HW_NORMAL_MODE | 3370 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3371 GEN6_RP_MEDIA_IS_GFX | 3371 GEN6_RP_MEDIA_IS_GFX |
3372 GEN6_RP_ENABLE | 3372 GEN6_RP_ENABLE |
3373 GEN6_RP_UP_BUSY_AVG | 3373 GEN6_RP_UP_BUSY_AVG |
3374 GEN6_RP_DOWN_IDLE_AVG); 3374 GEN6_RP_DOWN_IDLE_AVG);
3375 3375
3376 /* 6: Ring frequency + overclocking (our driver does this later */ 3376 /* 6: Ring frequency + overclocking (our driver does this later */
3377 3377
3378 gen6_set_rps(dev, (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8); 3378 gen6_set_rps(dev, (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8);
3379 3379
3380 gen6_enable_rps_interrupts(dev); 3380 gen6_enable_rps_interrupts(dev);
3381 3381
3382 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); 3382 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
3383} 3383}
3384 3384
3385static void gen6_enable_rps(struct drm_device *dev) 3385static void gen6_enable_rps(struct drm_device *dev)
3386{ 3386{
3387 struct drm_i915_private *dev_priv = dev->dev_private; 3387 struct drm_i915_private *dev_priv = dev->dev_private;
3388 struct intel_ring_buffer *ring; 3388 struct intel_ring_buffer *ring;
3389 u32 rp_state_cap; 3389 u32 rp_state_cap;
3390 u32 rc6vids, pcu_mbox = 0, rc6_mask = 0; 3390 u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
3391 u32 gtfifodbg; 3391 u32 gtfifodbg;
3392 int rc6_mode; 3392 int rc6_mode;
3393 int i, ret; 3393 int i, ret;
3394 3394
3395 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 3395 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3396 3396
3397 /* Here begins a magic sequence of register writes to enable 3397 /* Here begins a magic sequence of register writes to enable
3398 * auto-downclocking. 3398 * auto-downclocking.
3399 * 3399 *
3400 * Perhaps there might be some value in exposing these to 3400 * Perhaps there might be some value in exposing these to
3401 * userspace... 3401 * userspace...
3402 */ 3402 */
3403 I915_WRITE(GEN6_RC_STATE, 0); 3403 I915_WRITE(GEN6_RC_STATE, 0);
3404 3404
3405 /* Clear the DBG now so we don't confuse earlier errors */ 3405 /* Clear the DBG now so we don't confuse earlier errors */
3406 if ((gtfifodbg = I915_READ(GTFIFODBG))) { 3406 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
3407 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg); 3407 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
3408 I915_WRITE(GTFIFODBG, gtfifodbg); 3408 I915_WRITE(GTFIFODBG, gtfifodbg);
3409 } 3409 }
3410 3410
3411 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); 3411 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
3412 3412
3413 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 3413 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3414 (void)I915_READ(GEN6_GT_PERF_STATUS); 3414 (void)I915_READ(GEN6_GT_PERF_STATUS);
3415 3415
3416 /* All of these values are in units of 50MHz */ 3416 /* All of these values are in units of 50MHz */
3417 dev_priv->rps.cur_freq = 0; 3417 dev_priv->rps.cur_freq = 0;
3418 /* static values from HW: RP0 < RPe < RP1 < RPn (min_freq) */ 3418 /* static values from HW: RP0 < RPe < RP1 < RPn (min_freq) */
3419 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff; 3419 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
3420 dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff; 3420 dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
3421 dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff; 3421 dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
3422 /* XXX: only BYT has a special efficient freq */ 3422 /* XXX: only BYT has a special efficient freq */
3423 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq; 3423 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
3424 /* hw_max = RP0 until we check for overclocking */ 3424 /* hw_max = RP0 until we check for overclocking */
3425 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq; 3425 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
3426 3426
3427 /* Preserve min/max settings in case of re-init */ 3427 /* Preserve min/max settings in case of re-init */
3428 if (dev_priv->rps.max_freq_softlimit == 0) 3428 if (dev_priv->rps.max_freq_softlimit == 0)
3429 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; 3429 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
3430 3430
3431 if (dev_priv->rps.min_freq_softlimit == 0) 3431 if (dev_priv->rps.min_freq_softlimit == 0)
3432 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq; 3432 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
3433 3433
3434 /* disable the counters and set deterministic thresholds */ 3434 /* disable the counters and set deterministic thresholds */
3435 I915_WRITE(GEN6_RC_CONTROL, 0); 3435 I915_WRITE(GEN6_RC_CONTROL, 0);
3436 3436
3437 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16); 3437 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
3438 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30); 3438 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
3439 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30); 3439 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
3440 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); 3440 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
3441 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); 3441 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
3442 3442
3443 for_each_ring(ring, dev_priv, i) 3443 for_each_ring(ring, dev_priv, i)
3444 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); 3444 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
3445 3445
3446 I915_WRITE(GEN6_RC_SLEEP, 0); 3446 I915_WRITE(GEN6_RC_SLEEP, 0);
3447 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); 3447 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
3448 if (IS_IVYBRIDGE(dev)) 3448 if (IS_IVYBRIDGE(dev))
3449 I915_WRITE(GEN6_RC6_THRESHOLD, 125000); 3449 I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
3450 else 3450 else
3451 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); 3451 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
3452 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000); 3452 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
3453 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ 3453 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
3454 3454
3455 /* Check if we are enabling RC6 */ 3455 /* Check if we are enabling RC6 */
3456 rc6_mode = intel_enable_rc6(dev_priv->dev); 3456 rc6_mode = intel_enable_rc6(dev_priv->dev);
3457 if (rc6_mode & INTEL_RC6_ENABLE) 3457 if (rc6_mode & INTEL_RC6_ENABLE)
3458 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE; 3458 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
3459 3459
3460 /* We don't use those on Haswell */ 3460 /* We don't use those on Haswell */
3461 if (!IS_HASWELL(dev)) { 3461 if (!IS_HASWELL(dev)) {
3462 if (rc6_mode & INTEL_RC6p_ENABLE) 3462 if (rc6_mode & INTEL_RC6p_ENABLE)
3463 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE; 3463 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
3464 3464
3465 if (rc6_mode & INTEL_RC6pp_ENABLE) 3465 if (rc6_mode & INTEL_RC6pp_ENABLE)
3466 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE; 3466 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
3467 } 3467 }
3468 3468
3469 intel_print_rc6_info(dev, rc6_mask); 3469 intel_print_rc6_info(dev, rc6_mask);
3470 3470
3471 I915_WRITE(GEN6_RC_CONTROL, 3471 I915_WRITE(GEN6_RC_CONTROL,
3472 rc6_mask | 3472 rc6_mask |
3473 GEN6_RC_CTL_EI_MODE(1) | 3473 GEN6_RC_CTL_EI_MODE(1) |
3474 GEN6_RC_CTL_HW_ENABLE); 3474 GEN6_RC_CTL_HW_ENABLE);
3475 3475
3476 /* Power down if completely idle for over 50ms */ 3476 /* Power down if completely idle for over 50ms */
3477 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000); 3477 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
3478 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); 3478 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
3479 3479
3480 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0); 3480 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
3481 if (ret) 3481 if (ret)
3482 DRM_DEBUG_DRIVER("Failed to set the min frequency\n"); 3482 DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
3483 3483
3484 ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox); 3484 ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
3485 if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */ 3485 if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
3486 DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n", 3486 DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
3487 (dev_priv->rps.max_freq_softlimit & 0xff) * 50, 3487 (dev_priv->rps.max_freq_softlimit & 0xff) * 50,
3488 (pcu_mbox & 0xff) * 50); 3488 (pcu_mbox & 0xff) * 50);
3489 dev_priv->rps.max_freq = pcu_mbox & 0xff; 3489 dev_priv->rps.max_freq = pcu_mbox & 0xff;
3490 } 3490 }
3491 3491
3492 dev_priv->rps.power = HIGH_POWER; /* force a reset */ 3492 dev_priv->rps.power = HIGH_POWER; /* force a reset */
3493 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit); 3493 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
3494 3494
3495 gen6_enable_rps_interrupts(dev); 3495 gen6_enable_rps_interrupts(dev);
3496 3496
3497 rc6vids = 0; 3497 rc6vids = 0;
3498 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); 3498 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
3499 if (IS_GEN6(dev) && ret) { 3499 if (IS_GEN6(dev) && ret) {
3500 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n"); 3500 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
3501 } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) { 3501 } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
3502 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n", 3502 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
3503 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450); 3503 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
3504 rc6vids &= 0xffff00; 3504 rc6vids &= 0xffff00;
3505 rc6vids |= GEN6_ENCODE_RC6_VID(450); 3505 rc6vids |= GEN6_ENCODE_RC6_VID(450);
3506 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids); 3506 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
3507 if (ret) 3507 if (ret)
3508 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n"); 3508 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
3509 } 3509 }
3510 3510
3511 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); 3511 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
3512} 3512}
3513 3513
3514void gen6_update_ring_freq(struct drm_device *dev) 3514void gen6_update_ring_freq(struct drm_device *dev)
3515{ 3515{
3516 struct drm_i915_private *dev_priv = dev->dev_private; 3516 struct drm_i915_private *dev_priv = dev->dev_private;
3517 int min_freq = 15; 3517 int min_freq = 15;
3518 unsigned int gpu_freq; 3518 unsigned int gpu_freq;
3519 unsigned int max_ia_freq, min_ring_freq; 3519 unsigned int max_ia_freq, min_ring_freq;
3520 int scaling_factor = 180; 3520 int scaling_factor = 180;
3521#ifndef __NetBSD__ 3521#ifndef __NetBSD__
3522 struct cpufreq_policy *policy; 3522 struct cpufreq_policy *policy;
3523#endif 3523#endif
3524 3524
3525 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 3525 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3526 3526
3527#ifdef __NetBSD__ 3527#ifdef __NetBSD__
3528 { 3528 {
3529 extern uint64_t tsc_freq; /* x86 TSC frequency in Hz */ 3529 extern uint64_t tsc_freq; /* x86 TSC frequency in Hz */
3530 max_ia_freq = (tsc_freq / 1000); 3530 max_ia_freq = (tsc_freq / 1000);
3531 } 3531 }
3532#else 3532#else
3533 policy = cpufreq_cpu_get(0); 3533 policy = cpufreq_cpu_get(0);
3534 if (policy) { 3534 if (policy) {
3535 max_ia_freq = policy->cpuinfo.max_freq; 3535 max_ia_freq = policy->cpuinfo.max_freq;
3536 cpufreq_cpu_put(policy); 3536 cpufreq_cpu_put(policy);
3537 } else { 3537 } else {
3538 /* 3538 /*
3539 * Default to measured freq if none found, PCU will ensure we 3539 * Default to measured freq if none found, PCU will ensure we
3540 * don't go over 3540 * don't go over
3541 */ 3541 */
3542 max_ia_freq = tsc_khz; 3542 max_ia_freq = tsc_khz;
3543 } 3543 }
3544#endif 3544#endif
3545 3545
3546 /* Convert from kHz to MHz */ 3546 /* Convert from kHz to MHz */
3547 max_ia_freq /= 1000; 3547 max_ia_freq /= 1000;
3548 3548
3549 min_ring_freq = I915_READ(DCLK) & 0xf; 3549 min_ring_freq = I915_READ(DCLK) & 0xf;
3550 /* convert DDR frequency from units of 266.6MHz to bandwidth */ 3550 /* convert DDR frequency from units of 266.6MHz to bandwidth */
3551 min_ring_freq = mult_frac(min_ring_freq, 8, 3); 3551 min_ring_freq = mult_frac(min_ring_freq, 8, 3);
3552 3552
3553 /* 3553 /*
3554 * For each potential GPU frequency, load a ring frequency we'd like 3554 * For each potential GPU frequency, load a ring frequency we'd like
3555 * to use for memory access. We do this by specifying the IA frequency 3555 * to use for memory access. We do this by specifying the IA frequency
3556 * the PCU should use as a reference to determine the ring frequency. 3556 * the PCU should use as a reference to determine the ring frequency.
3557 */ 3557 */
3558 for (gpu_freq = dev_priv->rps.max_freq_softlimit; gpu_freq >= dev_priv->rps.min_freq_softlimit; 3558 for (gpu_freq = dev_priv->rps.max_freq_softlimit; gpu_freq >= dev_priv->rps.min_freq_softlimit;
3559 gpu_freq--) { 3559 gpu_freq--) {
3560 int diff = dev_priv->rps.max_freq_softlimit - gpu_freq; 3560 int diff = dev_priv->rps.max_freq_softlimit - gpu_freq;
3561 unsigned int ia_freq = 0, ring_freq = 0; 3561 unsigned int ia_freq = 0, ring_freq = 0;
3562 3562
3563 if (INTEL_INFO(dev)->gen >= 8) { 3563 if (INTEL_INFO(dev)->gen >= 8) {
3564 /* max(2 * GT, DDR). NB: GT is 50MHz units */ 3564 /* max(2 * GT, DDR). NB: GT is 50MHz units */
3565 ring_freq = max(min_ring_freq, gpu_freq); 3565 ring_freq = max(min_ring_freq, gpu_freq);
3566 } else if (IS_HASWELL(dev)) { 3566 } else if (IS_HASWELL(dev)) {
3567 ring_freq = mult_frac(gpu_freq, 5, 4); 3567 ring_freq = mult_frac(gpu_freq, 5, 4);
3568 ring_freq = max(min_ring_freq, ring_freq); 3568 ring_freq = max(min_ring_freq, ring_freq);
3569 /* leave ia_freq as the default, chosen by cpufreq */ 3569 /* leave ia_freq as the default, chosen by cpufreq */
3570 } else { 3570 } else {
3571 /* On older processors, there is no separate ring 3571 /* On older processors, there is no separate ring
3572 * clock domain, so in order to boost the bandwidth 3572 * clock domain, so in order to boost the bandwidth
3573 * of the ring, we need to upclock the CPU (ia_freq). 3573 * of the ring, we need to upclock the CPU (ia_freq).
3574 * 3574 *
3575 * For GPU frequencies less than 750MHz, 3575 * For GPU frequencies less than 750MHz,
3576 * just use the lowest ring freq. 3576 * just use the lowest ring freq.
3577 */ 3577 */
3578 if (gpu_freq < min_freq) 3578 if (gpu_freq < min_freq)
3579 ia_freq = 800; 3579 ia_freq = 800;
3580 else 3580 else
3581 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2); 3581 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
3582 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100); 3582 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
3583 } 3583 }
3584 3584
3585 sandybridge_pcode_write(dev_priv, 3585 sandybridge_pcode_write(dev_priv,
3586 GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 3586 GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
3587 ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT | 3587 ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
3588 ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT | 3588 ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
3589 gpu_freq); 3589 gpu_freq);
3590 } 3590 }
3591} 3591}
3592 3592
3593int valleyview_rps_max_freq(struct drm_i915_private *dev_priv) 3593int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
3594{ 3594{
3595 u32 val, rp0; 3595 u32 val, rp0;
3596 3596
3597 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE); 3597 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
3598 3598
3599 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT; 3599 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
3600 /* Clamp to max */ 3600 /* Clamp to max */
3601 rp0 = min_t(u32, rp0, 0xea); 3601 rp0 = min_t(u32, rp0, 0xea);
3602 3602
3603 return rp0; 3603 return rp0;
3604} 3604}
3605 3605
3606static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv) 3606static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
3607{ 3607{
3608 u32 val, rpe; 3608 u32 val, rpe;
3609 3609
3610 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO); 3610 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
3611 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT; 3611 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
3612 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI); 3612 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
3613 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5; 3613 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
3614 3614
3615 return rpe; 3615 return rpe;
3616} 3616}
3617 3617
3618int valleyview_rps_min_freq(struct drm_i915_private *dev_priv) 3618int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
3619{ 3619{
3620 return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff; 3620 return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
3621} 3621}
3622 3622
3623/* Check that the pctx buffer wasn't move under us. */ 3623/* Check that the pctx buffer wasn't move under us. */
3624static void valleyview_check_pctx(struct drm_i915_private *dev_priv) 3624static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
3625{ 3625{
3626 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095; 3626 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
3627 3627
 3628 if (WARN_ON(!dev_priv->vlv_pctx))
 3629 return;
3628 WARN_ON(pctx_addr != dev_priv->mm.stolen_base + 3630 WARN_ON(pctx_addr != dev_priv->mm.stolen_base +
3629 dev_priv->vlv_pctx->stolen->start); 3631 dev_priv->vlv_pctx->stolen->start);
3630} 3632}
3631 3633
3632static void valleyview_setup_pctx(struct drm_device *dev) 3634static void valleyview_setup_pctx(struct drm_device *dev)
3633{ 3635{
3634 struct drm_i915_private *dev_priv = dev->dev_private; 3636 struct drm_i915_private *dev_priv = dev->dev_private;
3635 struct drm_i915_gem_object *pctx; 3637 struct drm_i915_gem_object *pctx;
3636 unsigned long pctx_paddr; 3638 unsigned long pctx_paddr;
3637 u32 pcbr; 3639 u32 pcbr;
3638 int pctx_size = 24*1024; 3640 int pctx_size = 24*1024;
3639 3641
3640 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 3642 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
3641 3643
3642 pcbr = I915_READ(VLV_PCBR); 3644 pcbr = I915_READ(VLV_PCBR);
3643 if (pcbr) { 3645 if (pcbr) {
3644 /* BIOS set it up already, grab the pre-alloc'd space */ 3646 /* BIOS set it up already, grab the pre-alloc'd space */
3645 int pcbr_offset; 3647 int pcbr_offset;
3646 3648
3647 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base; 3649 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
3648 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev, 3650 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
3649 pcbr_offset, 3651 pcbr_offset,
3650 I915_GTT_OFFSET_NONE, 3652 I915_GTT_OFFSET_NONE,
3651 pctx_size); 3653 pctx_size);
3652 goto out; 3654 goto out;
3653 } 3655 }
3654 3656
3655 /* 3657 /*
3656 * From the Gunit register HAS: 3658 * From the Gunit register HAS:
3657 * The Gfx driver is expected to program this register and ensure 3659 * The Gfx driver is expected to program this register and ensure
3658 * proper allocation within Gfx stolen memory. For example, this 3660 * proper allocation within Gfx stolen memory. For example, this
3659 * register should be programmed such than the PCBR range does not 3661 * register should be programmed such than the PCBR range does not
3660 * overlap with other ranges, such as the frame buffer, protected 3662 * overlap with other ranges, such as the frame buffer, protected
3661 * memory, or any other relevant ranges. 3663 * memory, or any other relevant ranges.
3662 */ 3664 */
3663 pctx = i915_gem_object_create_stolen(dev, pctx_size); 3665 pctx = i915_gem_object_create_stolen(dev, pctx_size);
3664 if (!pctx) { 3666 if (!pctx) {
3665 DRM_DEBUG("not enough stolen space for PCTX, disabling\n"); 3667 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
3666 return; 3668 return;
3667 } 3669 }
3668 3670
3669 pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start; 3671 pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
3670 I915_WRITE(VLV_PCBR, pctx_paddr); 3672 I915_WRITE(VLV_PCBR, pctx_paddr);
3671 3673
3672out: 3674out:
3673 dev_priv->vlv_pctx = pctx; 3675 dev_priv->vlv_pctx = pctx;
3674} 3676}
3675 3677
3676static void valleyview_cleanup_pctx(struct drm_device *dev) 3678static void valleyview_cleanup_pctx(struct drm_device *dev)
3677{ 3679{
3678 struct drm_i915_private *dev_priv = dev->dev_private; 3680 struct drm_i915_private *dev_priv = dev->dev_private;
3679 3681
3680 if (WARN_ON(!dev_priv->vlv_pctx)) 3682 if (WARN_ON(!dev_priv->vlv_pctx))
3681 return; 3683 return;
3682 3684
3683 drm_gem_object_unreference(&dev_priv->vlv_pctx->base); 3685 drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
3684 dev_priv->vlv_pctx = NULL; 3686 dev_priv->vlv_pctx = NULL;
3685} 3687}
3686 3688
3687static void valleyview_enable_rps(struct drm_device *dev) 3689static void valleyview_enable_rps(struct drm_device *dev)
3688{ 3690{
3689 struct drm_i915_private *dev_priv = dev->dev_private; 3691 struct drm_i915_private *dev_priv = dev->dev_private;
3690 struct intel_ring_buffer *ring; 3692 struct intel_ring_buffer *ring;
3691 u32 gtfifodbg, val, rc6_mode = 0; 3693 u32 gtfifodbg, val, rc6_mode = 0;
3692 int i; 3694 int i;
3693 3695
3694 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 3696 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3695 3697
3696 valleyview_check_pctx(dev_priv); 3698 valleyview_check_pctx(dev_priv);
3697 3699
3698 if ((gtfifodbg = I915_READ(GTFIFODBG))) { 3700 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
3699 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n", 3701 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
3700 gtfifodbg); 3702 gtfifodbg);
3701 I915_WRITE(GTFIFODBG, gtfifodbg); 3703 I915_WRITE(GTFIFODBG, gtfifodbg);
3702 } 3704 }
3703 3705
3704 /* If VLV, Forcewake all wells, else re-direct to regular path */ 3706 /* If VLV, Forcewake all wells, else re-direct to regular path */
3705 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); 3707 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
3706 3708
3707 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400); 3709 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
3708 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000); 3710 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
3709 I915_WRITE(GEN6_RP_UP_EI, 66000); 3711 I915_WRITE(GEN6_RP_UP_EI, 66000);
3710 I915_WRITE(GEN6_RP_DOWN_EI, 350000); 3712 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
3711 3713
3712 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); 3714 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
3713 3715
3714 I915_WRITE(GEN6_RP_CONTROL, 3716 I915_WRITE(GEN6_RP_CONTROL,
3715 GEN6_RP_MEDIA_TURBO | 3717 GEN6_RP_MEDIA_TURBO |
3716 GEN6_RP_MEDIA_HW_NORMAL_MODE | 3718 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3717 GEN6_RP_MEDIA_IS_GFX | 3719 GEN6_RP_MEDIA_IS_GFX |
3718 GEN6_RP_ENABLE | 3720 GEN6_RP_ENABLE |
3719 GEN6_RP_UP_BUSY_AVG | 3721 GEN6_RP_UP_BUSY_AVG |
3720 GEN6_RP_DOWN_IDLE_CONT); 3722 GEN6_RP_DOWN_IDLE_CONT);
3721 3723
3722 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000); 3724 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
3723 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); 3725 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
3724 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); 3726 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
3725 3727
3726 for_each_ring(ring, dev_priv, i) 3728 for_each_ring(ring, dev_priv, i)
3727 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); 3729 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
3728 3730
3729 I915_WRITE(GEN6_RC6_THRESHOLD, 0x557); 3731 I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
3730 3732
3731 /* allows RC6 residency counter to work */ 3733 /* allows RC6 residency counter to work */
3732 I915_WRITE(VLV_COUNTER_CONTROL, 3734 I915_WRITE(VLV_COUNTER_CONTROL,
3733 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH | 3735 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
3734 VLV_MEDIA_RC6_COUNT_EN | 3736 VLV_MEDIA_RC6_COUNT_EN |
3735 VLV_RENDER_RC6_COUNT_EN)); 3737 VLV_RENDER_RC6_COUNT_EN));
3736 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) 3738 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
3737 rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL; 3739 rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
3738 3740
3739 intel_print_rc6_info(dev, rc6_mode); 3741 intel_print_rc6_info(dev, rc6_mode);
3740 3742
3741 I915_WRITE(GEN6_RC_CONTROL, rc6_mode); 3743 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
3742 3744
3743 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 3745 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
3744 3746
3745 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no"); 3747 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
3746 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); 3748 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
3747 3749
3748 dev_priv->rps.cur_freq = (val >> 8) & 0xff; 3750 dev_priv->rps.cur_freq = (val >> 8) & 0xff;
3749 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n", 3751 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
3750 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq), 3752 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
3751 dev_priv->rps.cur_freq); 3753 dev_priv->rps.cur_freq);
3752 3754
3753 dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv); 3755 dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
3754 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq; 3756 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
3755 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", 3757 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
3756 vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq), 3758 vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
3757 dev_priv->rps.max_freq); 3759 dev_priv->rps.max_freq);
3758 3760
3759 dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv); 3761 dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
3760 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n", 3762 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
3761 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), 3763 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
3762 dev_priv->rps.efficient_freq); 3764 dev_priv->rps.efficient_freq);
3763 3765
3764 dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv); 3766 dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
3765 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", 3767 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
3766 vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq), 3768 vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
3767 dev_priv->rps.min_freq); 3769 dev_priv->rps.min_freq);
3768 3770
3769 /* Preserve min/max settings in case of re-init */ 3771 /* Preserve min/max settings in case of re-init */
3770 if (dev_priv->rps.max_freq_softlimit == 0) 3772 if (dev_priv->rps.max_freq_softlimit == 0)
3771 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; 3773 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
3772 3774
3773 if (dev_priv->rps.min_freq_softlimit == 0) 3775 if (dev_priv->rps.min_freq_softlimit == 0)
3774 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq; 3776 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
3775 3777
3776 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n", 3778 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
3777 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), 3779 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
3778 dev_priv->rps.efficient_freq); 3780 dev_priv->rps.efficient_freq);
3779 3781
3780 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq); 3782 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
3781 3783
3782 gen6_enable_rps_interrupts(dev); 3784 gen6_enable_rps_interrupts(dev);
3783 3785
3784 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); 3786 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
3785} 3787}
3786 3788
3787void ironlake_teardown_rc6(struct drm_device *dev) 3789void ironlake_teardown_rc6(struct drm_device *dev)
3788{ 3790{
3789 struct drm_i915_private *dev_priv = dev->dev_private; 3791 struct drm_i915_private *dev_priv = dev->dev_private;
3790 3792
3791 if (dev_priv->ips.renderctx) { 3793 if (dev_priv->ips.renderctx) {
3792 i915_gem_object_ggtt_unpin(dev_priv->ips.renderctx); 3794 i915_gem_object_ggtt_unpin(dev_priv->ips.renderctx);
3793 drm_gem_object_unreference(&dev_priv->ips.renderctx->base); 3795 drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
3794 dev_priv->ips.renderctx = NULL; 3796 dev_priv->ips.renderctx = NULL;
3795 } 3797 }
3796 3798
3797 if (dev_priv->ips.pwrctx) { 3799 if (dev_priv->ips.pwrctx) {
3798 i915_gem_object_ggtt_unpin(dev_priv->ips.pwrctx); 3800 i915_gem_object_ggtt_unpin(dev_priv->ips.pwrctx);
3799 drm_gem_object_unreference(&dev_priv->ips.pwrctx->base); 3801 drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
3800 dev_priv->ips.pwrctx = NULL; 3802 dev_priv->ips.pwrctx = NULL;
3801 } 3803 }
3802} 3804}
3803 3805
3804static void ironlake_disable_rc6(struct drm_device *dev) 3806static void ironlake_disable_rc6(struct drm_device *dev)
3805{ 3807{
3806 struct drm_i915_private *dev_priv = dev->dev_private; 3808 struct drm_i915_private *dev_priv = dev->dev_private;
3807 3809
3808 if (I915_READ(PWRCTXA)) { 3810 if (I915_READ(PWRCTXA)) {
3809 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */ 3811 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
3810 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT); 3812 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
3811 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON), 3813 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
3812 50); 3814 50);
3813 3815
3814 I915_WRITE(PWRCTXA, 0); 3816 I915_WRITE(PWRCTXA, 0);
3815 POSTING_READ(PWRCTXA); 3817 POSTING_READ(PWRCTXA);
3816 3818
3817 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); 3819 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
3818 POSTING_READ(RSTDBYCTL); 3820 POSTING_READ(RSTDBYCTL);
3819 } 3821 }
3820} 3822}
3821 3823
3822static int ironlake_setup_rc6(struct drm_device *dev) 3824static int ironlake_setup_rc6(struct drm_device *dev)
3823{ 3825{
3824 struct drm_i915_private *dev_priv = dev->dev_private; 3826 struct drm_i915_private *dev_priv = dev->dev_private;
3825 3827
3826 if (dev_priv->ips.renderctx == NULL) 3828 if (dev_priv->ips.renderctx == NULL)
3827 dev_priv->ips.renderctx = intel_alloc_context_page(dev); 3829 dev_priv->ips.renderctx = intel_alloc_context_page(dev);
3828 if (!dev_priv->ips.renderctx) 3830 if (!dev_priv->ips.renderctx)
3829 return -ENOMEM; 3831 return -ENOMEM;
3830 3832
3831 if (dev_priv->ips.pwrctx == NULL) 3833 if (dev_priv->ips.pwrctx == NULL)
3832 dev_priv->ips.pwrctx = intel_alloc_context_page(dev); 3834 dev_priv->ips.pwrctx = intel_alloc_context_page(dev);
3833 if (!dev_priv->ips.pwrctx) { 3835 if (!dev_priv->ips.pwrctx) {
3834 ironlake_teardown_rc6(dev); 3836 ironlake_teardown_rc6(dev);
3835 return -ENOMEM; 3837 return -ENOMEM;
3836 } 3838 }
3837 3839
3838 return 0; 3840 return 0;
3839} 3841}
3840 3842
3841static void ironlake_enable_rc6(struct drm_device *dev) 3843static void ironlake_enable_rc6(struct drm_device *dev)
3842{ 3844{
3843 struct drm_i915_private *dev_priv = dev->dev_private; 3845 struct drm_i915_private *dev_priv = dev->dev_private;
3844 struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; 3846 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
3845 bool was_interruptible; 3847 bool was_interruptible;
3846 int ret; 3848 int ret;
3847 3849
3848 /* rc6 disabled by default due to repeated reports of hanging during 3850 /* rc6 disabled by default due to repeated reports of hanging during
3849 * boot and resume. 3851 * boot and resume.
3850 */ 3852 */
3851 if (!intel_enable_rc6(dev)) 3853 if (!intel_enable_rc6(dev))
3852 return; 3854 return;
3853 3855
3854 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 3856 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
3855 3857
3856 ret = ironlake_setup_rc6(dev); 3858 ret = ironlake_setup_rc6(dev);
3857 if (ret) 3859 if (ret)
3858 return; 3860 return;
3859 3861
3860 was_interruptible = dev_priv->mm.interruptible; 3862 was_interruptible = dev_priv->mm.interruptible;
3861 dev_priv->mm.interruptible = false; 3863 dev_priv->mm.interruptible = false;
3862 3864
3863 /* 3865 /*
3864 * GPU can automatically power down the render unit if given a page 3866 * GPU can automatically power down the render unit if given a page
3865 * to save state. 3867 * to save state.
3866 */ 3868 */
3867 ret = intel_ring_begin(ring, 6); 3869 ret = intel_ring_begin(ring, 6);
3868 if (ret) { 3870 if (ret) {
3869 ironlake_teardown_rc6(dev); 3871 ironlake_teardown_rc6(dev);
3870 dev_priv->mm.interruptible = was_interruptible; 3872 dev_priv->mm.interruptible = was_interruptible;
3871 return; 3873 return;
3872 } 3874 }
3873 3875
3874 intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); 3876 intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
3875 intel_ring_emit(ring, MI_SET_CONTEXT); 3877 intel_ring_emit(ring, MI_SET_CONTEXT);
3876 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) | 3878 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) |
3877 MI_MM_SPACE_GTT | 3879 MI_MM_SPACE_GTT |
3878 MI_SAVE_EXT_STATE_EN | 3880 MI_SAVE_EXT_STATE_EN |
3879 MI_RESTORE_EXT_STATE_EN | 3881 MI_RESTORE_EXT_STATE_EN |
3880 MI_RESTORE_INHIBIT); 3882 MI_RESTORE_INHIBIT);
3881 intel_ring_emit(ring, MI_SUSPEND_FLUSH); 3883 intel_ring_emit(ring, MI_SUSPEND_FLUSH);
3882 intel_ring_emit(ring, MI_NOOP); 3884 intel_ring_emit(ring, MI_NOOP);
3883 intel_ring_emit(ring, MI_FLUSH); 3885 intel_ring_emit(ring, MI_FLUSH);
3884 intel_ring_advance(ring); 3886 intel_ring_advance(ring);
3885 3887
3886 /* 3888 /*
3887 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW 3889 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
3888 * does an implicit flush, combined with MI_FLUSH above, it should be 3890 * does an implicit flush, combined with MI_FLUSH above, it should be
3889 * safe to assume that renderctx is valid 3891 * safe to assume that renderctx is valid
3890 */ 3892 */
3891 ret = intel_ring_idle(ring); 3893 ret = intel_ring_idle(ring);
3892 dev_priv->mm.interruptible = was_interruptible; 3894 dev_priv->mm.interruptible = was_interruptible;
3893 if (ret) { 3895 if (ret) {
3894 DRM_ERROR("failed to enable ironlake power savings\n"); 3896 DRM_ERROR("failed to enable ironlake power savings\n");
3895 ironlake_teardown_rc6(dev); 3897 ironlake_teardown_rc6(dev);
3896 return; 3898 return;
3897 } 3899 }
3898 3900
3899 I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN); 3901 I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
3900 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); 3902 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
3901 3903
3902 intel_print_rc6_info(dev, INTEL_RC6_ENABLE); 3904 intel_print_rc6_info(dev, INTEL_RC6_ENABLE);
3903} 3905}
3904 3906
3905static unsigned long intel_pxfreq(u32 vidfreq) 3907static unsigned long intel_pxfreq(u32 vidfreq)
3906{ 3908{
3907 unsigned long freq; 3909 unsigned long freq;
3908 int div = (vidfreq & 0x3f0000) >> 16; 3910 int div = (vidfreq & 0x3f0000) >> 16;
3909 int post = (vidfreq & 0x3000) >> 12; 3911 int post = (vidfreq & 0x3000) >> 12;
3910 int pre = (vidfreq & 0x7); 3912 int pre = (vidfreq & 0x7);
3911 3913
3912 if (!pre) 3914 if (!pre)
3913 return 0; 3915 return 0;
3914 3916
3915 freq = ((div * 133333) / ((1<<post) * pre)); 3917 freq = ((div * 133333) / ((1<<post) * pre));
3916 3918
3917 return freq; 3919 return freq;
3918} 3920}
3919 3921
3920static const struct cparams { 3922static const struct cparams {
3921 u16 i; 3923 u16 i;
3922 u16 t; 3924 u16 t;
3923 u16 m; 3925 u16 m;
3924 u16 c; 3926 u16 c;
3925} cparams[] = { 3927} cparams[] = {
3926 { 1, 1333, 301, 28664 }, 3928 { 1, 1333, 301, 28664 },
3927 { 1, 1066, 294, 24460 }, 3929 { 1, 1066, 294, 24460 },
3928 { 1, 800, 294, 25192 }, 3930 { 1, 800, 294, 25192 },
3929 { 0, 1333, 276, 27605 }, 3931 { 0, 1333, 276, 27605 },
3930 { 0, 1066, 276, 27605 }, 3932 { 0, 1066, 276, 27605 },
3931 { 0, 800, 231, 23784 }, 3933 { 0, 800, 231, 23784 },
3932}; 3934};
3933 3935
3934static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv) 3936static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
3935{ 3937{
3936 u64 total_count, diff, ret; 3938 u64 total_count, diff, ret;
3937 u32 count1, count2, count3, m = 0, c = 0; 3939 u32 count1, count2, count3, m = 0, c = 0;
3938 unsigned long now = jiffies_to_msecs(jiffies), diff1; 3940 unsigned long now = jiffies_to_msecs(jiffies), diff1;
3939 int i; 3941 int i;
3940 3942
3941 assert_spin_locked(&mchdev_lock); 3943 assert_spin_locked(&mchdev_lock);
3942 3944
3943 diff1 = now - dev_priv->ips.last_time1; 3945 diff1 = now - dev_priv->ips.last_time1;
3944 3946
3945 /* Prevent division-by-zero if we are asking too fast. 3947 /* Prevent division-by-zero if we are asking too fast.
3946 * Also, we don't get interesting results if we are polling 3948 * Also, we don't get interesting results if we are polling
3947 * faster than once in 10ms, so just return the saved value 3949 * faster than once in 10ms, so just return the saved value
3948 * in such cases. 3950 * in such cases.
3949 */ 3951 */
3950 if (diff1 <= 10) 3952 if (diff1 <= 10)
3951 return dev_priv->ips.chipset_power; 3953 return dev_priv->ips.chipset_power;
3952 3954
3953 count1 = I915_READ(DMIEC); 3955 count1 = I915_READ(DMIEC);
3954 count2 = I915_READ(DDREC); 3956 count2 = I915_READ(DDREC);
3955 count3 = I915_READ(CSIEC); 3957 count3 = I915_READ(CSIEC);
3956 3958
3957 total_count = count1 + count2 + count3; 3959 total_count = count1 + count2 + count3;
3958 3960
3959 /* FIXME: handle per-counter overflow */ 3961 /* FIXME: handle per-counter overflow */
3960 if (total_count < dev_priv->ips.last_count1) { 3962 if (total_count < dev_priv->ips.last_count1) {
3961 diff = ~0UL - dev_priv->ips.last_count1; 3963 diff = ~0UL - dev_priv->ips.last_count1;
3962 diff += total_count; 3964 diff += total_count;
3963 } else { 3965 } else {
3964 diff = total_count - dev_priv->ips.last_count1; 3966 diff = total_count - dev_priv->ips.last_count1;
3965 } 3967 }
3966 3968
3967 for (i = 0; i < ARRAY_SIZE(cparams); i++) { 3969 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
3968 if (cparams[i].i == dev_priv->ips.c_m && 3970 if (cparams[i].i == dev_priv->ips.c_m &&
3969 cparams[i].t == dev_priv->ips.r_t) { 3971 cparams[i].t == dev_priv->ips.r_t) {
3970 m = cparams[i].m; 3972 m = cparams[i].m;
3971 c = cparams[i].c; 3973 c = cparams[i].c;
3972 break; 3974 break;
3973 } 3975 }
3974 } 3976 }
3975 3977
3976 diff = div_u64(diff, diff1); 3978 diff = div_u64(diff, diff1);
3977 ret = ((m * diff) + c); 3979 ret = ((m * diff) + c);
3978 ret = div_u64(ret, 10); 3980 ret = div_u64(ret, 10);
3979 3981
3980 dev_priv->ips.last_count1 = total_count; 3982 dev_priv->ips.last_count1 = total_count;
3981 dev_priv->ips.last_time1 = now; 3983 dev_priv->ips.last_time1 = now;
3982 3984
3983 dev_priv->ips.chipset_power = ret; 3985 dev_priv->ips.chipset_power = ret;
3984 3986
3985 return ret; 3987 return ret;
3986} 3988}
3987 3989
3988unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) 3990unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
3989{ 3991{
3990 struct drm_device *dev = dev_priv->dev; 3992 struct drm_device *dev = dev_priv->dev;
3991 unsigned long val; 3993 unsigned long val;
3992 3994
3993 if (INTEL_INFO(dev)->gen != 5) 3995 if (INTEL_INFO(dev)->gen != 5)
3994 return 0; 3996 return 0;
3995 3997
3996 spin_lock_irq(&mchdev_lock); 3998 spin_lock_irq(&mchdev_lock);
3997 3999
3998 val = __i915_chipset_val(dev_priv); 4000 val = __i915_chipset_val(dev_priv);
3999 4001
4000 spin_unlock_irq(&mchdev_lock); 4002 spin_unlock_irq(&mchdev_lock);
4001 4003
4002 return val; 4004 return val;
4003} 4005}
4004 4006
4005unsigned long i915_mch_val(struct drm_i915_private *dev_priv) 4007unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
4006{ 4008{
4007 unsigned long m, x, b; 4009 unsigned long m, x, b;
4008 u32 tsfs; 4010 u32 tsfs;
4009 4011
4010 tsfs = I915_READ(TSFS); 4012 tsfs = I915_READ(TSFS);
4011 4013
4012 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT); 4014 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
4013 x = I915_READ8(TR1); 4015 x = I915_READ8(TR1);
4014 4016
4015 b = tsfs & TSFS_INTR_MASK; 4017 b = tsfs & TSFS_INTR_MASK;
4016 4018
4017 return ((m * x) / 127) - b; 4019 return ((m * x) / 127) - b;
4018} 4020}
4019 4021
4020static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) 4022static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
4021{ 4023{
4022 struct drm_device *dev = dev_priv->dev; 4024 struct drm_device *dev = dev_priv->dev;
4023 static const struct v_table { 4025 static const struct v_table {
4024 u16 vd; /* in .1 mil */ 4026 u16 vd; /* in .1 mil */
4025 u16 vm; /* in .1 mil */ 4027 u16 vm; /* in .1 mil */
4026 } v_table[] = { 4028 } v_table[] = {
4027 { 0, 0, }, 4029 { 0, 0, },
4028 { 375, 0, }, 4030 { 375, 0, },
4029 { 500, 0, }, 4031 { 500, 0, },
4030 { 625, 0, }, 4032 { 625, 0, },
4031 { 750, 0, }, 4033 { 750, 0, },
4032 { 875, 0, }, 4034 { 875, 0, },
4033 { 1000, 0, }, 4035 { 1000, 0, },
4034 { 1125, 0, }, 4036 { 1125, 0, },
4035 { 4125, 3000, }, 4037 { 4125, 3000, },
4036 { 4125, 3000, }, 4038 { 4125, 3000, },
4037 { 4125, 3000, }, 4039 { 4125, 3000, },
4038 { 4125, 3000, }, 4040 { 4125, 3000, },
4039 { 4125, 3000, }, 4041 { 4125, 3000, },
4040 { 4125, 3000, }, 4042 { 4125, 3000, },
4041 { 4125, 3000, }, 4043 { 4125, 3000, },
4042 { 4125, 3000, }, 4044 { 4125, 3000, },
4043 { 4125, 3000, }, 4045 { 4125, 3000, },
4044 { 4125, 3000, }, 4046 { 4125, 3000, },
4045 { 4125, 3000, }, 4047 { 4125, 3000, },
4046 { 4125, 3000, }, 4048 { 4125, 3000, },
4047 { 4125, 3000, }, 4049 { 4125, 3000, },
4048 { 4125, 3000, }, 4050 { 4125, 3000, },
4049 { 4125, 3000, }, 4051 { 4125, 3000, },
4050 { 4125, 3000, }, 4052 { 4125, 3000, },
4051 { 4125, 3000, }, 4053 { 4125, 3000, },
4052 { 4125, 3000, }, 4054 { 4125, 3000, },
4053 { 4125, 3000, }, 4055 { 4125, 3000, },
4054 { 4125, 3000, }, 4056 { 4125, 3000, },
4055 { 4125, 3000, }, 4057 { 4125, 3000, },
4056 { 4125, 3000, }, 4058 { 4125, 3000, },
4057 { 4125, 3000, }, 4059 { 4125, 3000, },
4058 { 4125, 3000, }, 4060 { 4125, 3000, },
4059 { 4250, 3125, }, 4061 { 4250, 3125, },
4060 { 4375, 3250, }, 4062 { 4375, 3250, },
4061 { 4500, 3375, }, 4063 { 4500, 3375, },
4062 { 4625, 3500, }, 4064 { 4625, 3500, },
4063 { 4750, 3625, }, 4065 { 4750, 3625, },
4064 { 4875, 3750, }, 4066 { 4875, 3750, },
4065 { 5000, 3875, }, 4067 { 5000, 3875, },
4066 { 5125, 4000, }, 4068 { 5125, 4000, },
4067 { 5250, 4125, }, 4069 { 5250, 4125, },
4068 { 5375, 4250, }, 4070 { 5375, 4250, },
4069 { 5500, 4375, }, 4071 { 5500, 4375, },
4070 { 5625, 4500, }, 4072 { 5625, 4500, },
4071 { 5750, 4625, }, 4073 { 5750, 4625, },
4072 { 5875, 4750, }, 4074 { 5875, 4750, },
4073 { 6000, 4875, }, 4075 { 6000, 4875, },
4074 { 6125, 5000, }, 4076 { 6125, 5000, },
4075 { 6250, 5125, }, 4077 { 6250, 5125, },
4076 { 6375, 5250, }, 4078 { 6375, 5250, },
4077 { 6500, 5375, }, 4079 { 6500, 5375, },
4078 { 6625, 5500, }, 4080 { 6625, 5500, },
4079 { 6750, 5625, }, 4081 { 6750, 5625, },
4080 { 6875, 5750, }, 4082 { 6875, 5750, },
4081 { 7000, 5875, }, 4083 { 7000, 5875, },
4082 { 7125, 6000, }, 4084 { 7125, 6000, },
4083 { 7250, 6125, }, 4085 { 7250, 6125, },
4084 { 7375, 6250, }, 4086 { 7375, 6250, },
4085 { 7500, 6375, }, 4087 { 7500, 6375, },
4086 { 7625, 6500, }, 4088 { 7625, 6500, },
4087 { 7750, 6625, }, 4089 { 7750, 6625, },
4088 { 7875, 6750, }, 4090 { 7875, 6750, },
4089 { 8000, 6875, }, 4091 { 8000, 6875, },
4090 { 8125, 7000, }, 4092 { 8125, 7000, },
4091 { 8250, 7125, }, 4093 { 8250, 7125, },
4092 { 8375, 7250, }, 4094 { 8375, 7250, },
4093 { 8500, 7375, }, 4095 { 8500, 7375, },
4094 { 8625, 7500, }, 4096 { 8625, 7500, },
4095 { 8750, 7625, }, 4097 { 8750, 7625, },
4096 { 8875, 7750, }, 4098 { 8875, 7750, },
4097 { 9000, 7875, }, 4099 { 9000, 7875, },
4098 { 9125, 8000, }, 4100 { 9125, 8000, },
4099 { 9250, 8125, }, 4101 { 9250, 8125, },
4100 { 9375, 8250, }, 4102 { 9375, 8250, },
4101 { 9500, 8375, }, 4103 { 9500, 8375, },
4102 { 9625, 8500, }, 4104 { 9625, 8500, },
4103 { 9750, 8625, }, 4105 { 9750, 8625, },
4104 { 9875, 8750, }, 4106 { 9875, 8750, },
4105 { 10000, 8875, }, 4107 { 10000, 8875, },
4106 { 10125, 9000, }, 4108 { 10125, 9000, },
4107 { 10250, 9125, }, 4109 { 10250, 9125, },
4108 { 10375, 9250, }, 4110 { 10375, 9250, },
4109 { 10500, 9375, }, 4111 { 10500, 9375, },
4110 { 10625, 9500, }, 4112 { 10625, 9500, },
4111 { 10750, 9625, }, 4113 { 10750, 9625, },
4112 { 10875, 9750, }, 4114 { 10875, 9750, },
4113 { 11000, 9875, }, 4115 { 11000, 9875, },
4114 { 11125, 10000, }, 4116 { 11125, 10000, },
4115 { 11250, 10125, }, 4117 { 11250, 10125, },
4116 { 11375, 10250, }, 4118 { 11375, 10250, },
4117 { 11500, 10375, }, 4119 { 11500, 10375, },
4118 { 11625, 10500, }, 4120 { 11625, 10500, },
4119 { 11750, 10625, }, 4121 { 11750, 10625, },
4120 { 11875, 10750, }, 4122 { 11875, 10750, },
4121 { 12000, 10875, }, 4123 { 12000, 10875, },
4122 { 12125, 11000, }, 4124 { 12125, 11000, },
4123 { 12250, 11125, }, 4125 { 12250, 11125, },
4124 { 12375, 11250, }, 4126 { 12375, 11250, },
4125 { 12500, 11375, }, 4127 { 12500, 11375, },
4126 { 12625, 11500, }, 4128 { 12625, 11500, },
4127 { 12750, 11625, }, 4129 { 12750, 11625, },
4128 { 12875, 11750, }, 4130 { 12875, 11750, },
4129 { 13000, 11875, }, 4131 { 13000, 11875, },
4130 { 13125, 12000, }, 4132 { 13125, 12000, },
4131 { 13250, 12125, }, 4133 { 13250, 12125, },
4132 { 13375, 12250, }, 4134 { 13375, 12250, },
4133 { 13500, 12375, }, 4135 { 13500, 12375, },
4134 { 13625, 12500, }, 4136 { 13625, 12500, },
4135 { 13750, 12625, }, 4137 { 13750, 12625, },
4136 { 13875, 12750, }, 4138 { 13875, 12750, },
4137 { 14000, 12875, }, 4139 { 14000, 12875, },
4138 { 14125, 13000, }, 4140 { 14125, 13000, },
4139 { 14250, 13125, }, 4141 { 14250, 13125, },
4140 { 14375, 13250, }, 4142 { 14375, 13250, },
4141 { 14500, 13375, }, 4143 { 14500, 13375, },
4142 { 14625, 13500, }, 4144 { 14625, 13500, },
4143 { 14750, 13625, }, 4145 { 14750, 13625, },
4144 { 14875, 13750, }, 4146 { 14875, 13750, },
4145 { 15000, 13875, }, 4147 { 15000, 13875, },
4146 { 15125, 14000, }, 4148 { 15125, 14000, },
4147 { 15250, 14125, }, 4149 { 15250, 14125, },
4148 { 15375, 14250, }, 4150 { 15375, 14250, },
4149 { 15500, 14375, }, 4151 { 15500, 14375, },
4150 { 15625, 14500, }, 4152 { 15625, 14500, },
4151 { 15750, 14625, }, 4153 { 15750, 14625, },
4152 { 15875, 14750, }, 4154 { 15875, 14750, },
4153 { 16000, 14875, }, 4155 { 16000, 14875, },
4154 { 16125, 15000, }, 4156 { 16125, 15000, },
4155 }; 4157 };
4156 if (INTEL_INFO(dev)->is_mobile) 4158 if (INTEL_INFO(dev)->is_mobile)
4157 return v_table[pxvid].vm; 4159 return v_table[pxvid].vm;
4158 else 4160 else
4159 return v_table[pxvid].vd; 4161 return v_table[pxvid].vd;
4160} 4162}
4161 4163
4162static void __i915_update_gfx_val(struct drm_i915_private *dev_priv) 4164static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
4163{ 4165{
4164 struct timespec now, diff1; 4166 struct timespec now, diff1;
4165 u64 diff; 4167 u64 diff;
4166 unsigned long diffms; 4168 unsigned long diffms;
4167 u32 count; 4169 u32 count;
4168 4170
4169 assert_spin_locked(&mchdev_lock); 4171 assert_spin_locked(&mchdev_lock);
4170 4172
4171 getrawmonotonic(&now); 4173 getrawmonotonic(&now);
4172 diff1 = timespec_sub(now, dev_priv->ips.last_time2); 4174 diff1 = timespec_sub(now, dev_priv->ips.last_time2);
4173 4175
4174 /* Don't divide by 0 */ 4176 /* Don't divide by 0 */
4175 diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000; 4177 diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
4176 if (!diffms) 4178 if (!diffms)
4177 return; 4179 return;
4178 4180
4179 count = I915_READ(GFXEC); 4181 count = I915_READ(GFXEC);
4180 4182
4181 if (count < dev_priv->ips.last_count2) { 4183 if (count < dev_priv->ips.last_count2) {
4182 diff = ~0UL - dev_priv->ips.last_count2; 4184 diff = ~0UL - dev_priv->ips.last_count2;
4183 diff += count; 4185 diff += count;
4184 } else { 4186 } else {
4185 diff = count - dev_priv->ips.last_count2; 4187 diff = count - dev_priv->ips.last_count2;
4186 } 4188 }
4187 4189
4188 dev_priv->ips.last_count2 = count; 4190 dev_priv->ips.last_count2 = count;
4189 dev_priv->ips.last_time2 = now; 4191 dev_priv->ips.last_time2 = now;
4190 4192
4191 /* More magic constants... */ 4193 /* More magic constants... */
4192 diff = diff * 1181; 4194 diff = diff * 1181;
4193 diff = div_u64(diff, diffms * 10); 4195 diff = div_u64(diff, diffms * 10);
4194 dev_priv->ips.gfx_power = diff; 4196 dev_priv->ips.gfx_power = diff;
4195} 4197}
4196 4198
4197void i915_update_gfx_val(struct drm_i915_private *dev_priv) 4199void i915_update_gfx_val(struct drm_i915_private *dev_priv)
4198{ 4200{
4199 struct drm_device *dev = dev_priv->dev; 4201 struct drm_device *dev = dev_priv->dev;
4200 4202
4201 if (INTEL_INFO(dev)->gen != 5) 4203 if (INTEL_INFO(dev)->gen != 5)
4202 return; 4204 return;
4203 4205
4204 spin_lock_irq(&mchdev_lock); 4206 spin_lock_irq(&mchdev_lock);
4205 4207
4206 __i915_update_gfx_val(dev_priv); 4208 __i915_update_gfx_val(dev_priv);
4207 4209
4208 spin_unlock_irq(&mchdev_lock); 4210 spin_unlock_irq(&mchdev_lock);
4209} 4211}
4210 4212
4211static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv) 4213static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
4212{ 4214{
4213 unsigned long t, corr, state1, corr2, state2; 4215 unsigned long t, corr, state1, corr2, state2;
4214 u32 pxvid, ext_v; 4216 u32 pxvid, ext_v;
4215 4217
4216 assert_spin_locked(&mchdev_lock); 4218 assert_spin_locked(&mchdev_lock);
4217 4219
4218 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_freq * 4)); 4220 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_freq * 4));
4219 pxvid = (pxvid >> 24) & 0x7f; 4221 pxvid = (pxvid >> 24) & 0x7f;
4220 ext_v = pvid_to_extvid(dev_priv, pxvid); 4222 ext_v = pvid_to_extvid(dev_priv, pxvid);
4221 4223
4222 state1 = ext_v; 4224 state1 = ext_v;
4223 4225
4224 t = i915_mch_val(dev_priv); 4226 t = i915_mch_val(dev_priv);
4225 4227
4226 /* Revel in the empirically derived constants */ 4228 /* Revel in the empirically derived constants */
4227 4229
4228 /* Correction factor in 1/100000 units */ 4230 /* Correction factor in 1/100000 units */
4229 if (t > 80) 4231 if (t > 80)
4230 corr = ((t * 2349) + 135940); 4232 corr = ((t * 2349) + 135940);
4231 else if (t >= 50) 4233 else if (t >= 50)
4232 corr = ((t * 964) + 29317); 4234 corr = ((t * 964) + 29317);
4233 else /* < 50 */ 4235 else /* < 50 */
4234 corr = ((t * 301) + 1004); 4236 corr = ((t * 301) + 1004);
4235 4237
4236 corr = corr * ((150142 * state1) / 10000 - 78642); 4238 corr = corr * ((150142 * state1) / 10000 - 78642);
4237 corr /= 100000; 4239 corr /= 100000;
4238 corr2 = (corr * dev_priv->ips.corr); 4240 corr2 = (corr * dev_priv->ips.corr);
4239 4241
4240 state2 = (corr2 * state1) / 10000; 4242 state2 = (corr2 * state1) / 10000;
4241 state2 /= 100; /* convert to mW */ 4243 state2 /= 100; /* convert to mW */
4242 4244
4243 __i915_update_gfx_val(dev_priv); 4245 __i915_update_gfx_val(dev_priv);
4244 4246
4245 return dev_priv->ips.gfx_power + state2; 4247 return dev_priv->ips.gfx_power + state2;
4246} 4248}
4247 4249
4248unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) 4250unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
4249{ 4251{
4250 struct drm_device *dev = dev_priv->dev; 4252 struct drm_device *dev = dev_priv->dev;
4251 unsigned long val; 4253 unsigned long val;
4252 4254
4253 if (INTEL_INFO(dev)->gen != 5) 4255 if (INTEL_INFO(dev)->gen != 5)
4254 return 0; 4256 return 0;
4255 4257
4256 spin_lock_irq(&mchdev_lock); 4258 spin_lock_irq(&mchdev_lock);
4257 4259
4258 val = __i915_gfx_val(dev_priv); 4260 val = __i915_gfx_val(dev_priv);
4259 4261
4260 spin_unlock_irq(&mchdev_lock); 4262 spin_unlock_irq(&mchdev_lock);
4261 4263
4262 return val; 4264 return val;
4263} 4265}
4264 4266
4265/** 4267/**
4266 * i915_read_mch_val - return value for IPS use 4268 * i915_read_mch_val - return value for IPS use
4267 * 4269 *
4268 * Calculate and return a value for the IPS driver to use when deciding whether 4270 * Calculate and return a value for the IPS driver to use when deciding whether
4269 * we have thermal and power headroom to increase CPU or GPU power budget. 4271 * we have thermal and power headroom to increase CPU or GPU power budget.
4270 */ 4272 */
4271unsigned long i915_read_mch_val(void) 4273unsigned long i915_read_mch_val(void)
4272{ 4274{
4273 struct drm_i915_private *dev_priv; 4275 struct drm_i915_private *dev_priv;
4274 unsigned long chipset_val, graphics_val, ret = 0; 4276 unsigned long chipset_val, graphics_val, ret = 0;
4275 4277
4276 spin_lock_irq(&mchdev_lock); 4278 spin_lock_irq(&mchdev_lock);
4277 if (!i915_mch_dev) 4279 if (!i915_mch_dev)
4278 goto out_unlock; 4280 goto out_unlock;
4279 dev_priv = i915_mch_dev; 4281 dev_priv = i915_mch_dev;
4280 4282
4281 chipset_val = __i915_chipset_val(dev_priv); 4283 chipset_val = __i915_chipset_val(dev_priv);
4282 graphics_val = __i915_gfx_val(dev_priv); 4284 graphics_val = __i915_gfx_val(dev_priv);
4283 4285
4284 ret = chipset_val + graphics_val; 4286 ret = chipset_val + graphics_val;
4285 4287
4286out_unlock: 4288out_unlock:
4287 spin_unlock_irq(&mchdev_lock); 4289 spin_unlock_irq(&mchdev_lock);
4288 4290
4289 return ret; 4291 return ret;
4290} 4292}
4291EXPORT_SYMBOL_GPL(i915_read_mch_val); 4293EXPORT_SYMBOL_GPL(i915_read_mch_val);
4292 4294
4293/** 4295/**
4294 * i915_gpu_raise - raise GPU frequency limit 4296 * i915_gpu_raise - raise GPU frequency limit
4295 * 4297 *
4296 * Raise the limit; IPS indicates we have thermal headroom. 4298 * Raise the limit; IPS indicates we have thermal headroom.
4297 */ 4299 */
4298bool i915_gpu_raise(void) 4300bool i915_gpu_raise(void)
4299{ 4301{
4300 struct drm_i915_private *dev_priv; 4302 struct drm_i915_private *dev_priv;
4301 bool ret = true; 4303 bool ret = true;
4302 4304
4303 spin_lock_irq(&mchdev_lock); 4305 spin_lock_irq(&mchdev_lock);
4304 if (!i915_mch_dev) { 4306 if (!i915_mch_dev) {
4305 ret = false; 4307 ret = false;
4306 goto out_unlock; 4308 goto out_unlock;
4307 } 4309 }
4308 dev_priv = i915_mch_dev; 4310 dev_priv = i915_mch_dev;
4309 4311
4310 if (dev_priv->ips.max_delay > dev_priv->ips.fmax) 4312 if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
4311 dev_priv->ips.max_delay--; 4313 dev_priv->ips.max_delay--;
4312 4314
4313out_unlock: 4315out_unlock:
4314 spin_unlock_irq(&mchdev_lock); 4316 spin_unlock_irq(&mchdev_lock);
4315 4317
4316 return ret; 4318 return ret;
4317} 4319}
4318EXPORT_SYMBOL_GPL(i915_gpu_raise); 4320EXPORT_SYMBOL_GPL(i915_gpu_raise);
4319 4321
4320/** 4322/**
4321 * i915_gpu_lower - lower GPU frequency limit 4323 * i915_gpu_lower - lower GPU frequency limit
4322 * 4324 *
4323 * IPS indicates we're close to a thermal limit, so throttle back the GPU 4325 * IPS indicates we're close to a thermal limit, so throttle back the GPU
4324 * frequency maximum. 4326 * frequency maximum.
4325 */ 4327 */
4326bool i915_gpu_lower(void) 4328bool i915_gpu_lower(void)
4327{ 4329{
4328 struct drm_i915_private *dev_priv; 4330 struct drm_i915_private *dev_priv;
4329 bool ret = true; 4331 bool ret = true;
4330 4332
4331 spin_lock_irq(&mchdev_lock); 4333 spin_lock_irq(&mchdev_lock);
4332 if (!i915_mch_dev) { 4334 if (!i915_mch_dev) {
4333 ret = false; 4335 ret = false;
4334 goto out_unlock; 4336 goto out_unlock;
4335 } 4337 }
4336 dev_priv = i915_mch_dev; 4338 dev_priv = i915_mch_dev;
4337 4339
4338 if (dev_priv->ips.max_delay < dev_priv->ips.min_delay) 4340 if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
4339 dev_priv->ips.max_delay++; 4341 dev_priv->ips.max_delay++;
4340 4342
4341out_unlock: 4343out_unlock:
4342 spin_unlock_irq(&mchdev_lock); 4344 spin_unlock_irq(&mchdev_lock);
4343 4345
4344 return ret; 4346 return ret;
4345} 4347}
4346EXPORT_SYMBOL_GPL(i915_gpu_lower); 4348EXPORT_SYMBOL_GPL(i915_gpu_lower);
4347 4349
4348/** 4350/**
4349 * i915_gpu_busy - indicate GPU business to IPS 4351 * i915_gpu_busy - indicate GPU business to IPS
4350 * 4352 *
4351 * Tell the IPS driver whether or not the GPU is busy. 4353 * Tell the IPS driver whether or not the GPU is busy.
4352 */ 4354 */
4353bool i915_gpu_busy(void) 4355bool i915_gpu_busy(void)
4354{ 4356{
4355 struct drm_i915_private *dev_priv; 4357 struct drm_i915_private *dev_priv;
4356 struct intel_ring_buffer *ring; 4358 struct intel_ring_buffer *ring;
4357 bool ret = false; 4359 bool ret = false;
4358 int i; 4360 int i;
4359 4361
4360 spin_lock_irq(&mchdev_lock); 4362 spin_lock_irq(&mchdev_lock);
4361 if (!i915_mch_dev) 4363 if (!i915_mch_dev)
4362 goto out_unlock; 4364 goto out_unlock;
4363 dev_priv = i915_mch_dev; 4365 dev_priv = i915_mch_dev;
4364 4366
4365 for_each_ring(ring, dev_priv, i) 4367 for_each_ring(ring, dev_priv, i)
4366 ret |= !list_empty(&ring->request_list); 4368 ret |= !list_empty(&ring->request_list);
4367 4369
4368out_unlock: 4370out_unlock:
4369 spin_unlock_irq(&mchdev_lock); 4371 spin_unlock_irq(&mchdev_lock);
4370 4372
4371 return ret; 4373 return ret;
4372} 4374}
4373EXPORT_SYMBOL_GPL(i915_gpu_busy); 4375EXPORT_SYMBOL_GPL(i915_gpu_busy);
4374 4376
4375/** 4377/**
4376 * i915_gpu_turbo_disable - disable graphics turbo 4378 * i915_gpu_turbo_disable - disable graphics turbo
4377 * 4379 *
4378 * Disable graphics turbo by resetting the max frequency and setting the 4380 * Disable graphics turbo by resetting the max frequency and setting the
4379 * current frequency to the default. 4381 * current frequency to the default.
4380 */ 4382 */
4381bool i915_gpu_turbo_disable(void) 4383bool i915_gpu_turbo_disable(void)
4382{ 4384{
4383 struct drm_i915_private *dev_priv; 4385 struct drm_i915_private *dev_priv;
4384 bool ret = true; 4386 bool ret = true;
4385 4387
4386 spin_lock_irq(&mchdev_lock); 4388 spin_lock_irq(&mchdev_lock);
4387 if (!i915_mch_dev) { 4389 if (!i915_mch_dev) {
4388 ret = false; 4390 ret = false;
4389 goto out_unlock; 4391 goto out_unlock;
4390 } 4392 }
4391 dev_priv = i915_mch_dev; 4393 dev_priv = i915_mch_dev;
4392 4394
4393 dev_priv->ips.max_delay = dev_priv->ips.fstart; 4395 dev_priv->ips.max_delay = dev_priv->ips.fstart;
4394 4396
4395 if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart)) 4397 if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))
4396 ret = false; 4398 ret = false;
4397 4399
4398out_unlock: 4400out_unlock:
4399 spin_unlock_irq(&mchdev_lock); 4401 spin_unlock_irq(&mchdev_lock);
4400 4402
4401 return ret; 4403 return ret;
4402} 4404}
4403EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); 4405EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
4404 4406
4405/** 4407/**
4406 * Tells the intel_ips driver that the i915 driver is now loaded, if 4408 * Tells the intel_ips driver that the i915 driver is now loaded, if
4407 * IPS got loaded first. 4409 * IPS got loaded first.
4408 * 4410 *
4409 * This awkward dance is so that neither module has to depend on the 4411 * This awkward dance is so that neither module has to depend on the
4410 * other in order for IPS to do the appropriate communication of 4412 * other in order for IPS to do the appropriate communication of
4411 * GPU turbo limits to i915. 4413 * GPU turbo limits to i915.
4412 */ 4414 */
4413static void 4415static void
4414ips_ping_for_i915_load(void) 4416ips_ping_for_i915_load(void)
4415{ 4417{
4416#ifndef __NetBSD__ /* XXX IPS GPU turbo limits what? */ 4418#ifndef __NetBSD__ /* XXX IPS GPU turbo limits what? */
4417 void (*link)(void); 4419 void (*link)(void);
4418 4420
4419 link = symbol_get(ips_link_to_i915_driver); 4421 link = symbol_get(ips_link_to_i915_driver);
4420 if (link) { 4422 if (link) {
4421 link(); 4423 link();
4422 symbol_put(ips_link_to_i915_driver); 4424 symbol_put(ips_link_to_i915_driver);
4423 } 4425 }
4424#endif 4426#endif
4425} 4427}
4426 4428
4427void intel_gpu_ips_init(struct drm_i915_private *dev_priv) 4429void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
4428{ 4430{
4429 /* We only register the i915 ips part with intel-ips once everything is 4431 /* We only register the i915 ips part with intel-ips once everything is
4430 * set up, to avoid intel-ips sneaking in and reading bogus values. */ 4432 * set up, to avoid intel-ips sneaking in and reading bogus values. */
4431 spin_lock_irq(&mchdev_lock); 4433 spin_lock_irq(&mchdev_lock);
4432 i915_mch_dev = dev_priv; 4434 i915_mch_dev = dev_priv;
4433 spin_unlock_irq(&mchdev_lock); 4435 spin_unlock_irq(&mchdev_lock);
4434 4436
4435 ips_ping_for_i915_load(); 4437 ips_ping_for_i915_load();
4436} 4438}
4437 4439
4438void intel_gpu_ips_teardown(void) 4440void intel_gpu_ips_teardown(void)
4439{ 4441{
4440 spin_lock_irq(&mchdev_lock); 4442 spin_lock_irq(&mchdev_lock);
4441 i915_mch_dev = NULL; 4443 i915_mch_dev = NULL;
4442 spin_unlock_irq(&mchdev_lock); 4444 spin_unlock_irq(&mchdev_lock);
4443} 4445}
4444 4446
4445static void intel_init_emon(struct drm_device *dev) 4447static void intel_init_emon(struct drm_device *dev)
4446{ 4448{
4447 struct drm_i915_private *dev_priv = dev->dev_private; 4449 struct drm_i915_private *dev_priv = dev->dev_private;
4448 u32 lcfuse; 4450 u32 lcfuse;
4449 u8 pxw[16]; 4451 u8 pxw[16];
4450 int i; 4452 int i;
4451 4453
4452 /* Disable to program */ 4454 /* Disable to program */
4453 I915_WRITE(ECR, 0); 4455 I915_WRITE(ECR, 0);
4454 POSTING_READ(ECR); 4456 POSTING_READ(ECR);
4455 4457
4456 /* Program energy weights for various events */ 4458 /* Program energy weights for various events */
4457 I915_WRITE(SDEW, 0x15040d00); 4459 I915_WRITE(SDEW, 0x15040d00);
4458 I915_WRITE(CSIEW0, 0x007f0000); 4460 I915_WRITE(CSIEW0, 0x007f0000);
4459 I915_WRITE(CSIEW1, 0x1e220004); 4461 I915_WRITE(CSIEW1, 0x1e220004);
4460 I915_WRITE(CSIEW2, 0x04000004); 4462 I915_WRITE(CSIEW2, 0x04000004);
4461 4463
4462 for (i = 0; i < 5; i++) 4464 for (i = 0; i < 5; i++)
4463 I915_WRITE(PEW + (i * 4), 0); 4465 I915_WRITE(PEW + (i * 4), 0);
4464 for (i = 0; i < 3; i++) 4466 for (i = 0; i < 3; i++)
4465 I915_WRITE(DEW + (i * 4), 0); 4467 I915_WRITE(DEW + (i * 4), 0);
4466 4468
4467 /* Program P-state weights to account for frequency power adjustment */ 4469 /* Program P-state weights to account for frequency power adjustment */
4468 for (i = 0; i < 16; i++) { 4470 for (i = 0; i < 16; i++) {
4469 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4)); 4471 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
4470 unsigned long freq = intel_pxfreq(pxvidfreq); 4472 unsigned long freq = intel_pxfreq(pxvidfreq);
4471 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >> 4473 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
4472 PXVFREQ_PX_SHIFT; 4474 PXVFREQ_PX_SHIFT;
4473 unsigned long val; 4475 unsigned long val;
4474 4476
4475 val = vid * vid; 4477 val = vid * vid;
4476 val *= (freq / 1000); 4478 val *= (freq / 1000);
4477 val *= 255; 4479 val *= 255;
4478 val /= (127*127*900); 4480 val /= (127*127*900);
4479 if (val > 0xff) 4481 if (val > 0xff)
4480 DRM_ERROR("bad pxval: %ld\n", val); 4482 DRM_ERROR("bad pxval: %ld\n", val);
4481 pxw[i] = val; 4483 pxw[i] = val;
4482 } 4484 }
4483 /* Render standby states get 0 weight */ 4485 /* Render standby states get 0 weight */
4484 pxw[14] = 0; 4486 pxw[14] = 0;
4485 pxw[15] = 0; 4487 pxw[15] = 0;
4486 4488
4487 for (i = 0; i < 4; i++) { 4489 for (i = 0; i < 4; i++) {
4488 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) | 4490 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
4489 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]); 4491 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
4490 I915_WRITE(PXW + (i * 4), val); 4492 I915_WRITE(PXW + (i * 4), val);
4491 } 4493 }
4492 4494
4493 /* Adjust magic regs to magic values (more experimental results) */ 4495 /* Adjust magic regs to magic values (more experimental results) */
4494 I915_WRITE(OGW0, 0); 4496 I915_WRITE(OGW0, 0);
4495 I915_WRITE(OGW1, 0); 4497 I915_WRITE(OGW1, 0);
4496 I915_WRITE(EG0, 0x00007f00); 4498 I915_WRITE(EG0, 0x00007f00);
4497 I915_WRITE(EG1, 0x0000000e); 4499 I915_WRITE(EG1, 0x0000000e);
4498 I915_WRITE(EG2, 0x000e0000); 4500 I915_WRITE(EG2, 0x000e0000);
4499 I915_WRITE(EG3, 0x68000300); 4501 I915_WRITE(EG3, 0x68000300);
4500 I915_WRITE(EG4, 0x42000000); 4502 I915_WRITE(EG4, 0x42000000);
4501 I915_WRITE(EG5, 0x00140031); 4503 I915_WRITE(EG5, 0x00140031);
4502 I915_WRITE(EG6, 0); 4504 I915_WRITE(EG6, 0);
4503 I915_WRITE(EG7, 0); 4505 I915_WRITE(EG7, 0);
4504 4506
4505 for (i = 0; i < 8; i++) 4507 for (i = 0; i < 8; i++)
4506 I915_WRITE(PXWL + (i * 4), 0); 4508 I915_WRITE(PXWL + (i * 4), 0);
4507 4509
4508 /* Enable PMON + select events */ 4510 /* Enable PMON + select events */
4509 I915_WRITE(ECR, 0x80000019); 4511 I915_WRITE(ECR, 0x80000019);
4510 4512
4511 lcfuse = I915_READ(LCFUSE02); 4513 lcfuse = I915_READ(LCFUSE02);
4512 4514
4513 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK); 4515 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
4514} 4516}
4515 4517
4516void intel_init_gt_powersave(struct drm_device *dev) 4518void intel_init_gt_powersave(struct drm_device *dev)
4517{ 4519{
4518 if (IS_VALLEYVIEW(dev)) 4520 if (IS_VALLEYVIEW(dev))
4519 valleyview_setup_pctx(dev); 4521 valleyview_setup_pctx(dev);
4520} 4522}
4521 4523
4522void intel_cleanup_gt_powersave(struct drm_device *dev) 4524void intel_cleanup_gt_powersave(struct drm_device *dev)
4523{ 4525{
4524 if (IS_VALLEYVIEW(dev)) 4526 if (IS_VALLEYVIEW(dev))
4525 valleyview_cleanup_pctx(dev); 4527 valleyview_cleanup_pctx(dev);
4526} 4528}
4527 4529
4528void intel_disable_gt_powersave(struct drm_device *dev) 4530void intel_disable_gt_powersave(struct drm_device *dev)
4529{ 4531{
4530 struct drm_i915_private *dev_priv = dev->dev_private; 4532 struct drm_i915_private *dev_priv = dev->dev_private;
4531 4533
4532 /* Interrupts should be disabled already to avoid re-arming. */ 4534 /* Interrupts should be disabled already to avoid re-arming. */
4533 WARN_ON(dev->irq_enabled); 4535 WARN_ON(dev->irq_enabled);
4534 4536
4535 if (IS_IRONLAKE_M(dev)) { 4537 if (IS_IRONLAKE_M(dev)) {
4536 ironlake_disable_drps(dev); 4538 ironlake_disable_drps(dev);
4537 ironlake_disable_rc6(dev); 4539 ironlake_disable_rc6(dev);
4538 } else if (INTEL_INFO(dev)->gen >= 6) { 4540 } else if (INTEL_INFO(dev)->gen >= 6) {
4539 cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work); 4541 cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
4540 cancel_work_sync(&dev_priv->rps.work); 4542 cancel_work_sync(&dev_priv->rps.work);
4541 mutex_lock(&dev_priv->rps.hw_lock); 4543 mutex_lock(&dev_priv->rps.hw_lock);
4542 if (IS_VALLEYVIEW(dev)) 4544 if (IS_VALLEYVIEW(dev))
4543 valleyview_disable_rps(dev); 4545 valleyview_disable_rps(dev);
4544 else 4546 else
4545 gen6_disable_rps(dev); 4547 gen6_disable_rps(dev);
4546 dev_priv->rps.enabled = false; 4548 dev_priv->rps.enabled = false;
4547 mutex_unlock(&dev_priv->rps.hw_lock); 4549 mutex_unlock(&dev_priv->rps.hw_lock);
4548 } 4550 }
4549} 4551}
4550 4552
4551static void intel_gen6_powersave_work(struct work_struct *work) 4553static void intel_gen6_powersave_work(struct work_struct *work)
4552{ 4554{
4553 struct drm_i915_private *dev_priv = 4555 struct drm_i915_private *dev_priv =
4554 container_of(work, struct drm_i915_private, 4556 container_of(work, struct drm_i915_private,
4555 rps.delayed_resume_work.work); 4557 rps.delayed_resume_work.work);
4556 struct drm_device *dev = dev_priv->dev; 4558 struct drm_device *dev = dev_priv->dev;
4557 4559
4558 mutex_lock(&dev_priv->rps.hw_lock); 4560 mutex_lock(&dev_priv->rps.hw_lock);
4559 4561
4560 if (IS_VALLEYVIEW(dev)) { 4562 if (IS_VALLEYVIEW(dev)) {
4561 valleyview_enable_rps(dev); 4563 valleyview_enable_rps(dev);
4562 } else if (IS_BROADWELL(dev)) { 4564 } else if (IS_BROADWELL(dev)) {
4563 gen8_enable_rps(dev); 4565 gen8_enable_rps(dev);
4564 gen6_update_ring_freq(dev); 4566 gen6_update_ring_freq(dev);
4565 } else { 4567 } else {
4566 gen6_enable_rps(dev); 4568 gen6_enable_rps(dev);
4567 gen6_update_ring_freq(dev); 4569 gen6_update_ring_freq(dev);
4568 } 4570 }
4569 dev_priv->rps.enabled = true; 4571 dev_priv->rps.enabled = true;
4570 mutex_unlock(&dev_priv->rps.hw_lock); 4572 mutex_unlock(&dev_priv->rps.hw_lock);
4571} 4573}
4572 4574
4573void intel_enable_gt_powersave(struct drm_device *dev) 4575void intel_enable_gt_powersave(struct drm_device *dev)
4574{ 4576{
4575 struct drm_i915_private *dev_priv = dev->dev_private; 4577 struct drm_i915_private *dev_priv = dev->dev_private;
4576 4578
4577 if (IS_IRONLAKE_M(dev)) { 4579 if (IS_IRONLAKE_M(dev)) {
4578 ironlake_enable_drps(dev); 4580 ironlake_enable_drps(dev);
4579 ironlake_enable_rc6(dev); 4581 ironlake_enable_rc6(dev);
4580 intel_init_emon(dev); 4582 intel_init_emon(dev);
4581 } else if (IS_GEN6(dev) || IS_GEN7(dev)) { 4583 } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
4582 /* 4584 /*
4583 * PCU communication is slow and this doesn't need to be 4585 * PCU communication is slow and this doesn't need to be
4584 * done at any specific time, so do this out of our fast path 4586 * done at any specific time, so do this out of our fast path
4585 * to make resume and init faster. 4587 * to make resume and init faster.
4586 */ 4588 */
4587 schedule_delayed_work(&dev_priv->rps.delayed_resume_work, 4589 schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
4588 round_jiffies_up_relative(HZ)); 4590 round_jiffies_up_relative(HZ));
4589 } 4591 }
4590} 4592}
4591 4593
4592static void ibx_init_clock_gating(struct drm_device *dev) 4594static void ibx_init_clock_gating(struct drm_device *dev)
4593{ 4595{
4594 struct drm_i915_private *dev_priv = dev->dev_private; 4596 struct drm_i915_private *dev_priv = dev->dev_private;
4595 4597
4596 /* 4598 /*
4597 * On Ibex Peak and Cougar Point, we need to disable clock 4599 * On Ibex Peak and Cougar Point, we need to disable clock
4598 * gating for the panel power sequencer or it will fail to 4600 * gating for the panel power sequencer or it will fail to
4599 * start up when no ports are active. 4601 * start up when no ports are active.
4600 */ 4602 */
4601 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); 4603 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
4602} 4604}
4603 4605
4604static void g4x_disable_trickle_feed(struct drm_device *dev) 4606static void g4x_disable_trickle_feed(struct drm_device *dev)
4605{ 4607{
4606 struct drm_i915_private *dev_priv = dev->dev_private; 4608 struct drm_i915_private *dev_priv = dev->dev_private;
4607 int pipe; 4609 int pipe;
4608 4610
4609 for_each_pipe(pipe) { 4611 for_each_pipe(pipe) {
4610 I915_WRITE(DSPCNTR(pipe), 4612 I915_WRITE(DSPCNTR(pipe),
4611 I915_READ(DSPCNTR(pipe)) | 4613 I915_READ(DSPCNTR(pipe)) |
4612 DISPPLANE_TRICKLE_FEED_DISABLE); 4614 DISPPLANE_TRICKLE_FEED_DISABLE);
4613 intel_flush_primary_plane(dev_priv, pipe); 4615 intel_flush_primary_plane(dev_priv, pipe);
4614 } 4616 }
4615} 4617}
4616 4618
4617static void ilk_init_lp_watermarks(struct drm_device *dev) 4619static void ilk_init_lp_watermarks(struct drm_device *dev)
4618{ 4620{
4619 struct drm_i915_private *dev_priv = dev->dev_private; 4621 struct drm_i915_private *dev_priv = dev->dev_private;
4620 4622
4621 I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN); 4623 I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
4622 I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN); 4624 I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
4623 I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN); 4625 I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
4624 4626
4625 /* 4627 /*
4626 * Don't touch WM1S_LP_EN here. 4628 * Don't touch WM1S_LP_EN here.