Mon Aug 27 06:16:01 2018 UTC ()
provide enum irqreturn

merge in old ifdefs
ifdef out code that I'm unsure about right away and turn into panic
call.

Author: coypu <coypu@sdf.org>
Committer: Taylor R Campbell <riastradh@NetBSD.org>


(riastradh)
diff -r1.16 -r1.17 src/sys/external/bsd/drm2/dist/drm/i915/intel_display.c
diff -r1.11 -r1.12 src/sys/external/bsd/drm2/dist/drm/i915/intel_dp.c
diff -r1.4 -r1.5 src/sys/external/bsd/drm2/include/drm/drm_irq_netbsd.h

cvs diff -r1.16 -r1.17 src/sys/external/bsd/drm2/dist/drm/i915/Attic/intel_display.c (switch to unified diff)

--- src/sys/external/bsd/drm2/dist/drm/i915/Attic/intel_display.c 2018/08/27 04:58:24 1.16
+++ src/sys/external/bsd/drm2/dist/drm/i915/Attic/intel_display.c 2018/08/27 06:16:01 1.17
@@ -1,1029 +1,1029 @@ @@ -1,1029 +1,1029 @@
1/* $NetBSD: intel_display.c,v 1.16 2018/08/27 04:58:24 riastradh Exp $ */ 1/* $NetBSD: intel_display.c,v 1.17 2018/08/27 06:16:01 riastradh Exp $ */
2 2
3/* 3/*
4 * Copyright © 2006-2007 Intel Corporation 4 * Copyright © 2006-2007 Intel Corporation
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"), 7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation 8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the 10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions: 11 * Software is furnished to do so, subject to the following conditions:
12 * 12 *
13 * The above copyright notice and this permission notice (including the next 13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the 14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software. 15 * Software.
16 * 16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE. 23 * DEALINGS IN THE SOFTWARE.
24 * 24 *
25 * Authors: 25 * Authors:
26 * Eric Anholt <eric@anholt.net> 26 * Eric Anholt <eric@anholt.net>
27 */ 27 */
28 28
29#include <sys/cdefs.h> 29#include <sys/cdefs.h>
30__KERNEL_RCSID(0, "$NetBSD: intel_display.c,v 1.16 2018/08/27 04:58:24 riastradh Exp $"); 30__KERNEL_RCSID(0, "$NetBSD: intel_display.c,v 1.17 2018/08/27 06:16:01 riastradh Exp $");
31 31
32#include <linux/dmi.h> 32#include <linux/dmi.h>
33#include <linux/module.h> 33#include <linux/module.h>
34#include <linux/input.h> 34#include <linux/input.h>
35#include <linux/i2c.h> 35#include <linux/i2c.h>
36#include <linux/kernel.h> 36#include <linux/kernel.h>
37#include <linux/slab.h> 37#include <linux/slab.h>
38#include <linux/vgaarb.h> 38#include <linux/vgaarb.h>
39#include <drm/drm_edid.h> 39#include <drm/drm_edid.h>
40#include <drm/drmP.h> 40#include <drm/drmP.h>
41#include "intel_drv.h" 41#include "intel_drv.h"
42#include <drm/i915_drm.h> 42#include <drm/i915_drm.h>
43#include "i915_drv.h" 43#include "i915_drv.h"
44#include "i915_trace.h" 44#include "i915_trace.h"
45#include <drm/drm_atomic.h> 45#include <drm/drm_atomic.h>
46#include <drm/drm_atomic_helper.h> 46#include <drm/drm_atomic_helper.h>
47#include <drm/drm_dp_helper.h> 47#include <drm/drm_dp_helper.h>
48#include <drm/drm_crtc_helper.h> 48#include <drm/drm_crtc_helper.h>
49#include <drm/drm_plane_helper.h> 49#include <drm/drm_plane_helper.h>
50#include <drm/drm_rect.h> 50#include <drm/drm_rect.h>
51#include <linux/dma_remapping.h> 51#include <linux/dma_remapping.h>
52#include <linux/err.h> 52#include <linux/err.h>
53#include <asm/bug.h> 53#include <asm/bug.h>
54#include <linux/math64.h> 54#include <linux/math64.h>
55#include <linux/bitops.h> 55#include <linux/bitops.h>
56#include <linux/log2.h> 56#include <linux/log2.h>
57 57
58/* Primary plane formats for gen <= 3 */ 58/* Primary plane formats for gen <= 3 */
59static const uint32_t i8xx_primary_formats[] = { 59static const uint32_t i8xx_primary_formats[] = {
60 DRM_FORMAT_C8, 60 DRM_FORMAT_C8,
61 DRM_FORMAT_RGB565, 61 DRM_FORMAT_RGB565,
62 DRM_FORMAT_XRGB1555, 62 DRM_FORMAT_XRGB1555,
63 DRM_FORMAT_XRGB8888, 63 DRM_FORMAT_XRGB8888,
64}; 64};
65 65
66/* Primary plane formats for gen >= 4 */ 66/* Primary plane formats for gen >= 4 */
67static const uint32_t i965_primary_formats[] = { 67static const uint32_t i965_primary_formats[] = {
68 DRM_FORMAT_C8, 68 DRM_FORMAT_C8,
69 DRM_FORMAT_RGB565, 69 DRM_FORMAT_RGB565,
70 DRM_FORMAT_XRGB8888, 70 DRM_FORMAT_XRGB8888,
71 DRM_FORMAT_XBGR8888, 71 DRM_FORMAT_XBGR8888,
72 DRM_FORMAT_XRGB2101010, 72 DRM_FORMAT_XRGB2101010,
73 DRM_FORMAT_XBGR2101010, 73 DRM_FORMAT_XBGR2101010,
74}; 74};
75 75
76static const uint32_t skl_primary_formats[] = { 76static const uint32_t skl_primary_formats[] = {
77 DRM_FORMAT_C8, 77 DRM_FORMAT_C8,
78 DRM_FORMAT_RGB565, 78 DRM_FORMAT_RGB565,
79 DRM_FORMAT_XRGB8888, 79 DRM_FORMAT_XRGB8888,
80 DRM_FORMAT_XBGR8888, 80 DRM_FORMAT_XBGR8888,
81 DRM_FORMAT_ARGB8888, 81 DRM_FORMAT_ARGB8888,
82 DRM_FORMAT_ABGR8888, 82 DRM_FORMAT_ABGR8888,
83 DRM_FORMAT_XRGB2101010, 83 DRM_FORMAT_XRGB2101010,
84 DRM_FORMAT_XBGR2101010, 84 DRM_FORMAT_XBGR2101010,
85 DRM_FORMAT_YUYV, 85 DRM_FORMAT_YUYV,
86 DRM_FORMAT_YVYU, 86 DRM_FORMAT_YVYU,
87 DRM_FORMAT_UYVY, 87 DRM_FORMAT_UYVY,
88 DRM_FORMAT_VYUY, 88 DRM_FORMAT_VYUY,
89}; 89};
90 90
91/* Cursor formats */ 91/* Cursor formats */
92static const uint32_t intel_cursor_formats[] = { 92static const uint32_t intel_cursor_formats[] = {
93 DRM_FORMAT_ARGB8888, 93 DRM_FORMAT_ARGB8888,
94}; 94};
95 95
96static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); 96static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
97 97
98static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 98static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
99 struct intel_crtc_state *pipe_config); 99 struct intel_crtc_state *pipe_config);
100static void ironlake_pch_clock_get(struct intel_crtc *crtc, 100static void ironlake_pch_clock_get(struct intel_crtc *crtc,
101 struct intel_crtc_state *pipe_config); 101 struct intel_crtc_state *pipe_config);
102 102
103static int intel_framebuffer_init(struct drm_device *dev, 103static int intel_framebuffer_init(struct drm_device *dev,
104 struct intel_framebuffer *ifb, 104 struct intel_framebuffer *ifb,
105 struct drm_mode_fb_cmd2 *mode_cmd, 105 struct drm_mode_fb_cmd2 *mode_cmd,
106 struct drm_i915_gem_object *obj); 106 struct drm_i915_gem_object *obj);
107static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc); 107static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
108static void intel_set_pipe_timings(struct intel_crtc *intel_crtc); 108static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
109static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, 109static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
110 struct intel_link_m_n *m_n, 110 struct intel_link_m_n *m_n,
111 struct intel_link_m_n *m2_n2); 111 struct intel_link_m_n *m2_n2);
112static void ironlake_set_pipeconf(struct drm_crtc *crtc); 112static void ironlake_set_pipeconf(struct drm_crtc *crtc);
113static void haswell_set_pipeconf(struct drm_crtc *crtc); 113static void haswell_set_pipeconf(struct drm_crtc *crtc);
114static void intel_set_pipe_csc(struct drm_crtc *crtc); 114static void intel_set_pipe_csc(struct drm_crtc *crtc);
115static void vlv_prepare_pll(struct intel_crtc *crtc, 115static void vlv_prepare_pll(struct intel_crtc *crtc,
116 const struct intel_crtc_state *pipe_config); 116 const struct intel_crtc_state *pipe_config);
117static void chv_prepare_pll(struct intel_crtc *crtc, 117static void chv_prepare_pll(struct intel_crtc *crtc,
118 const struct intel_crtc_state *pipe_config); 118 const struct intel_crtc_state *pipe_config);
119static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *); 119static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
120static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *); 120static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
121static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc, 121static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
122 struct intel_crtc_state *crtc_state); 122 struct intel_crtc_state *crtc_state);
123static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state, 123static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
124 int num_connectors); 124 int num_connectors);
125static void skylake_pfit_enable(struct intel_crtc *crtc); 125static void skylake_pfit_enable(struct intel_crtc *crtc);
126static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force); 126static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
127static void ironlake_pfit_enable(struct intel_crtc *crtc); 127static void ironlake_pfit_enable(struct intel_crtc *crtc);
128static void intel_modeset_setup_hw_state(struct drm_device *dev); 128static void intel_modeset_setup_hw_state(struct drm_device *dev);
129static void intel_pre_disable_primary(struct drm_crtc *crtc); 129static void intel_pre_disable_primary(struct drm_crtc *crtc);
130 130
131typedef struct { 131typedef struct {
132 int min, max; 132 int min, max;
133} intel_range_t; 133} intel_range_t;
134 134
135typedef struct { 135typedef struct {
136 int dot_limit; 136 int dot_limit;
137 int p2_slow, p2_fast; 137 int p2_slow, p2_fast;
138} intel_p2_t; 138} intel_p2_t;
139 139
140typedef struct intel_limit intel_limit_t; 140typedef struct intel_limit intel_limit_t;
141struct intel_limit { 141struct intel_limit {
142 intel_range_t dot, vco, n, m, m1, m2, p, p1; 142 intel_range_t dot, vco, n, m, m1, m2, p, p1;
143 intel_p2_t p2; 143 intel_p2_t p2;
144}; 144};
145 145
146/* returns HPLL frequency in kHz */ 146/* returns HPLL frequency in kHz */
147static int valleyview_get_vco(struct drm_i915_private *dev_priv) 147static int valleyview_get_vco(struct drm_i915_private *dev_priv)
148{ 148{
149 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; 149 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
150 150
151 /* Obtain SKU information */ 151 /* Obtain SKU information */
152 mutex_lock(&dev_priv->sb_lock); 152 mutex_lock(&dev_priv->sb_lock);
153 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) & 153 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
154 CCK_FUSE_HPLL_FREQ_MASK; 154 CCK_FUSE_HPLL_FREQ_MASK;
155 mutex_unlock(&dev_priv->sb_lock); 155 mutex_unlock(&dev_priv->sb_lock);
156 156
157 return vco_freq[hpll_freq] * 1000; 157 return vco_freq[hpll_freq] * 1000;
158} 158}
159 159
160static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv, 160static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
161 const char *name, u32 reg) 161 const char *name, u32 reg)
162{ 162{
163 u32 val; 163 u32 val;
164 int divider; 164 int divider;
165 165
166 if (dev_priv->hpll_freq == 0) 166 if (dev_priv->hpll_freq == 0)
167 dev_priv->hpll_freq = valleyview_get_vco(dev_priv); 167 dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
168 168
169 mutex_lock(&dev_priv->sb_lock); 169 mutex_lock(&dev_priv->sb_lock);
170 val = vlv_cck_read(dev_priv, reg); 170 val = vlv_cck_read(dev_priv, reg);
171 mutex_unlock(&dev_priv->sb_lock); 171 mutex_unlock(&dev_priv->sb_lock);
172 172
173 divider = val & CCK_FREQUENCY_VALUES; 173 divider = val & CCK_FREQUENCY_VALUES;
174 174
175 WARN((val & CCK_FREQUENCY_STATUS) != 175 WARN((val & CCK_FREQUENCY_STATUS) !=
176 (divider << CCK_FREQUENCY_STATUS_SHIFT), 176 (divider << CCK_FREQUENCY_STATUS_SHIFT),
177 "%s change in progress\n", name); 177 "%s change in progress\n", name);
178 178
179 return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, divider + 1); 179 return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, divider + 1);
180} 180}
181 181
182int 182int
183intel_pch_rawclk(struct drm_device *dev) 183intel_pch_rawclk(struct drm_device *dev)
184{ 184{
185 struct drm_i915_private *dev_priv = dev->dev_private; 185 struct drm_i915_private *dev_priv = dev->dev_private;
186 186
187 WARN_ON(!HAS_PCH_SPLIT(dev)); 187 WARN_ON(!HAS_PCH_SPLIT(dev));
188 188
189 return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK; 189 return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
190} 190}
191 191
192/* hrawclock is 1/4 the FSB frequency */ 192/* hrawclock is 1/4 the FSB frequency */
193int intel_hrawclk(struct drm_device *dev) 193int intel_hrawclk(struct drm_device *dev)
194{ 194{
195 struct drm_i915_private *dev_priv = dev->dev_private; 195 struct drm_i915_private *dev_priv = dev->dev_private;
196 uint32_t clkcfg; 196 uint32_t clkcfg;
197 197
198 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */ 198 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
199 if (IS_VALLEYVIEW(dev)) 199 if (IS_VALLEYVIEW(dev))
200 return 200; 200 return 200;
201 201
202 clkcfg = I915_READ(CLKCFG); 202 clkcfg = I915_READ(CLKCFG);
203 switch (clkcfg & CLKCFG_FSB_MASK) { 203 switch (clkcfg & CLKCFG_FSB_MASK) {
204 case CLKCFG_FSB_400: 204 case CLKCFG_FSB_400:
205 return 100; 205 return 100;
206 case CLKCFG_FSB_533: 206 case CLKCFG_FSB_533:
207 return 133; 207 return 133;
208 case CLKCFG_FSB_667: 208 case CLKCFG_FSB_667:
209 return 166; 209 return 166;
210 case CLKCFG_FSB_800: 210 case CLKCFG_FSB_800:
211 return 200; 211 return 200;
212 case CLKCFG_FSB_1067: 212 case CLKCFG_FSB_1067:
213 return 266; 213 return 266;
214 case CLKCFG_FSB_1333: 214 case CLKCFG_FSB_1333:
215 return 333; 215 return 333;
216 /* these two are just a guess; one of them might be right */ 216 /* these two are just a guess; one of them might be right */
217 case CLKCFG_FSB_1600: 217 case CLKCFG_FSB_1600:
218 case CLKCFG_FSB_1600_ALT: 218 case CLKCFG_FSB_1600_ALT:
219 return 400; 219 return 400;
220 default: 220 default:
221 return 133; 221 return 133;
222 } 222 }
223} 223}
224 224
225static void intel_update_czclk(struct drm_i915_private *dev_priv) 225static void intel_update_czclk(struct drm_i915_private *dev_priv)
226{ 226{
227 if (!IS_VALLEYVIEW(dev_priv)) 227 if (!IS_VALLEYVIEW(dev_priv))
228 return; 228 return;
229 229
230 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk", 230 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
231 CCK_CZ_CLOCK_CONTROL); 231 CCK_CZ_CLOCK_CONTROL);
232 232
233 DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq); 233 DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
234} 234}
235 235
236static inline u32 /* units of 100MHz */ 236static inline u32 /* units of 100MHz */
237intel_fdi_link_freq(struct drm_device *dev) 237intel_fdi_link_freq(struct drm_device *dev)
238{ 238{
239 if (IS_GEN5(dev)) { 239 if (IS_GEN5(dev)) {
240 struct drm_i915_private *dev_priv = dev->dev_private; 240 struct drm_i915_private *dev_priv = dev->dev_private;
241 return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2; 241 return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
242 } else 242 } else
243 return 27; 243 return 27;
244} 244}
245 245
246static const intel_limit_t intel_limits_i8xx_dac = { 246static const intel_limit_t intel_limits_i8xx_dac = {
247 .dot = { .min = 25000, .max = 350000 }, 247 .dot = { .min = 25000, .max = 350000 },
248 .vco = { .min = 908000, .max = 1512000 }, 248 .vco = { .min = 908000, .max = 1512000 },
249 .n = { .min = 2, .max = 16 }, 249 .n = { .min = 2, .max = 16 },
250 .m = { .min = 96, .max = 140 }, 250 .m = { .min = 96, .max = 140 },
251 .m1 = { .min = 18, .max = 26 }, 251 .m1 = { .min = 18, .max = 26 },
252 .m2 = { .min = 6, .max = 16 }, 252 .m2 = { .min = 6, .max = 16 },
253 .p = { .min = 4, .max = 128 }, 253 .p = { .min = 4, .max = 128 },
254 .p1 = { .min = 2, .max = 33 }, 254 .p1 = { .min = 2, .max = 33 },
255 .p2 = { .dot_limit = 165000, 255 .p2 = { .dot_limit = 165000,
256 .p2_slow = 4, .p2_fast = 2 }, 256 .p2_slow = 4, .p2_fast = 2 },
257}; 257};
258 258
259static const intel_limit_t intel_limits_i8xx_dvo = { 259static const intel_limit_t intel_limits_i8xx_dvo = {
260 .dot = { .min = 25000, .max = 350000 }, 260 .dot = { .min = 25000, .max = 350000 },
261 .vco = { .min = 908000, .max = 1512000 }, 261 .vco = { .min = 908000, .max = 1512000 },
262 .n = { .min = 2, .max = 16 }, 262 .n = { .min = 2, .max = 16 },
263 .m = { .min = 96, .max = 140 }, 263 .m = { .min = 96, .max = 140 },
264 .m1 = { .min = 18, .max = 26 }, 264 .m1 = { .min = 18, .max = 26 },
265 .m2 = { .min = 6, .max = 16 }, 265 .m2 = { .min = 6, .max = 16 },
266 .p = { .min = 4, .max = 128 }, 266 .p = { .min = 4, .max = 128 },
267 .p1 = { .min = 2, .max = 33 }, 267 .p1 = { .min = 2, .max = 33 },
268 .p2 = { .dot_limit = 165000, 268 .p2 = { .dot_limit = 165000,
269 .p2_slow = 4, .p2_fast = 4 }, 269 .p2_slow = 4, .p2_fast = 4 },
270}; 270};
271 271
272static const intel_limit_t intel_limits_i8xx_lvds = { 272static const intel_limit_t intel_limits_i8xx_lvds = {
273 .dot = { .min = 25000, .max = 350000 }, 273 .dot = { .min = 25000, .max = 350000 },
274 .vco = { .min = 908000, .max = 1512000 }, 274 .vco = { .min = 908000, .max = 1512000 },
275 .n = { .min = 2, .max = 16 }, 275 .n = { .min = 2, .max = 16 },
276 .m = { .min = 96, .max = 140 }, 276 .m = { .min = 96, .max = 140 },
277 .m1 = { .min = 18, .max = 26 }, 277 .m1 = { .min = 18, .max = 26 },
278 .m2 = { .min = 6, .max = 16 }, 278 .m2 = { .min = 6, .max = 16 },
279 .p = { .min = 4, .max = 128 }, 279 .p = { .min = 4, .max = 128 },
280 .p1 = { .min = 1, .max = 6 }, 280 .p1 = { .min = 1, .max = 6 },
281 .p2 = { .dot_limit = 165000, 281 .p2 = { .dot_limit = 165000,
282 .p2_slow = 14, .p2_fast = 7 }, 282 .p2_slow = 14, .p2_fast = 7 },
283}; 283};
284 284
285static const intel_limit_t intel_limits_i9xx_sdvo = { 285static const intel_limit_t intel_limits_i9xx_sdvo = {
286 .dot = { .min = 20000, .max = 400000 }, 286 .dot = { .min = 20000, .max = 400000 },
287 .vco = { .min = 1400000, .max = 2800000 }, 287 .vco = { .min = 1400000, .max = 2800000 },
288 .n = { .min = 1, .max = 6 }, 288 .n = { .min = 1, .max = 6 },
289 .m = { .min = 70, .max = 120 }, 289 .m = { .min = 70, .max = 120 },
290 .m1 = { .min = 8, .max = 18 }, 290 .m1 = { .min = 8, .max = 18 },
291 .m2 = { .min = 3, .max = 7 }, 291 .m2 = { .min = 3, .max = 7 },
292 .p = { .min = 5, .max = 80 }, 292 .p = { .min = 5, .max = 80 },
293 .p1 = { .min = 1, .max = 8 }, 293 .p1 = { .min = 1, .max = 8 },
294 .p2 = { .dot_limit = 200000, 294 .p2 = { .dot_limit = 200000,
295 .p2_slow = 10, .p2_fast = 5 }, 295 .p2_slow = 10, .p2_fast = 5 },
296}; 296};
297 297
298static const intel_limit_t intel_limits_i9xx_lvds = { 298static const intel_limit_t intel_limits_i9xx_lvds = {
299 .dot = { .min = 20000, .max = 400000 }, 299 .dot = { .min = 20000, .max = 400000 },
300 .vco = { .min = 1400000, .max = 2800000 }, 300 .vco = { .min = 1400000, .max = 2800000 },
301 .n = { .min = 1, .max = 6 }, 301 .n = { .min = 1, .max = 6 },
302 .m = { .min = 70, .max = 120 }, 302 .m = { .min = 70, .max = 120 },
303 .m1 = { .min = 8, .max = 18 }, 303 .m1 = { .min = 8, .max = 18 },
304 .m2 = { .min = 3, .max = 7 }, 304 .m2 = { .min = 3, .max = 7 },
305 .p = { .min = 7, .max = 98 }, 305 .p = { .min = 7, .max = 98 },
306 .p1 = { .min = 1, .max = 8 }, 306 .p1 = { .min = 1, .max = 8 },
307 .p2 = { .dot_limit = 112000, 307 .p2 = { .dot_limit = 112000,
308 .p2_slow = 14, .p2_fast = 7 }, 308 .p2_slow = 14, .p2_fast = 7 },
309}; 309};
310 310
311 311
312static const intel_limit_t intel_limits_g4x_sdvo = { 312static const intel_limit_t intel_limits_g4x_sdvo = {
313 .dot = { .min = 25000, .max = 270000 }, 313 .dot = { .min = 25000, .max = 270000 },
314 .vco = { .min = 1750000, .max = 3500000}, 314 .vco = { .min = 1750000, .max = 3500000},
315 .n = { .min = 1, .max = 4 }, 315 .n = { .min = 1, .max = 4 },
316 .m = { .min = 104, .max = 138 }, 316 .m = { .min = 104, .max = 138 },
317 .m1 = { .min = 17, .max = 23 }, 317 .m1 = { .min = 17, .max = 23 },
318 .m2 = { .min = 5, .max = 11 }, 318 .m2 = { .min = 5, .max = 11 },
319 .p = { .min = 10, .max = 30 }, 319 .p = { .min = 10, .max = 30 },
320 .p1 = { .min = 1, .max = 3}, 320 .p1 = { .min = 1, .max = 3},
321 .p2 = { .dot_limit = 270000, 321 .p2 = { .dot_limit = 270000,
322 .p2_slow = 10, 322 .p2_slow = 10,
323 .p2_fast = 10 323 .p2_fast = 10
324 }, 324 },
325}; 325};
326 326
327static const intel_limit_t intel_limits_g4x_hdmi = { 327static const intel_limit_t intel_limits_g4x_hdmi = {
328 .dot = { .min = 22000, .max = 400000 }, 328 .dot = { .min = 22000, .max = 400000 },
329 .vco = { .min = 1750000, .max = 3500000}, 329 .vco = { .min = 1750000, .max = 3500000},
330 .n = { .min = 1, .max = 4 }, 330 .n = { .min = 1, .max = 4 },
331 .m = { .min = 104, .max = 138 }, 331 .m = { .min = 104, .max = 138 },
332 .m1 = { .min = 16, .max = 23 }, 332 .m1 = { .min = 16, .max = 23 },
333 .m2 = { .min = 5, .max = 11 }, 333 .m2 = { .min = 5, .max = 11 },
334 .p = { .min = 5, .max = 80 }, 334 .p = { .min = 5, .max = 80 },
335 .p1 = { .min = 1, .max = 8}, 335 .p1 = { .min = 1, .max = 8},
336 .p2 = { .dot_limit = 165000, 336 .p2 = { .dot_limit = 165000,
337 .p2_slow = 10, .p2_fast = 5 }, 337 .p2_slow = 10, .p2_fast = 5 },
338}; 338};
339 339
340static const intel_limit_t intel_limits_g4x_single_channel_lvds = { 340static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
341 .dot = { .min = 20000, .max = 115000 }, 341 .dot = { .min = 20000, .max = 115000 },
342 .vco = { .min = 1750000, .max = 3500000 }, 342 .vco = { .min = 1750000, .max = 3500000 },
343 .n = { .min = 1, .max = 3 }, 343 .n = { .min = 1, .max = 3 },
344 .m = { .min = 104, .max = 138 }, 344 .m = { .min = 104, .max = 138 },
345 .m1 = { .min = 17, .max = 23 }, 345 .m1 = { .min = 17, .max = 23 },
346 .m2 = { .min = 5, .max = 11 }, 346 .m2 = { .min = 5, .max = 11 },
347 .p = { .min = 28, .max = 112 }, 347 .p = { .min = 28, .max = 112 },
348 .p1 = { .min = 2, .max = 8 }, 348 .p1 = { .min = 2, .max = 8 },
349 .p2 = { .dot_limit = 0, 349 .p2 = { .dot_limit = 0,
350 .p2_slow = 14, .p2_fast = 14 350 .p2_slow = 14, .p2_fast = 14
351 }, 351 },
352}; 352};
353 353
354static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { 354static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
355 .dot = { .min = 80000, .max = 224000 }, 355 .dot = { .min = 80000, .max = 224000 },
356 .vco = { .min = 1750000, .max = 3500000 }, 356 .vco = { .min = 1750000, .max = 3500000 },
357 .n = { .min = 1, .max = 3 }, 357 .n = { .min = 1, .max = 3 },
358 .m = { .min = 104, .max = 138 }, 358 .m = { .min = 104, .max = 138 },
359 .m1 = { .min = 17, .max = 23 }, 359 .m1 = { .min = 17, .max = 23 },
360 .m2 = { .min = 5, .max = 11 }, 360 .m2 = { .min = 5, .max = 11 },
361 .p = { .min = 14, .max = 42 }, 361 .p = { .min = 14, .max = 42 },
362 .p1 = { .min = 2, .max = 6 }, 362 .p1 = { .min = 2, .max = 6 },
363 .p2 = { .dot_limit = 0, 363 .p2 = { .dot_limit = 0,
364 .p2_slow = 7, .p2_fast = 7 364 .p2_slow = 7, .p2_fast = 7
365 }, 365 },
366}; 366};
367 367
368static const intel_limit_t intel_limits_pineview_sdvo = { 368static const intel_limit_t intel_limits_pineview_sdvo = {
369 .dot = { .min = 20000, .max = 400000}, 369 .dot = { .min = 20000, .max = 400000},
370 .vco = { .min = 1700000, .max = 3500000 }, 370 .vco = { .min = 1700000, .max = 3500000 },
371 /* Pineview's Ncounter is a ring counter */ 371 /* Pineview's Ncounter is a ring counter */
372 .n = { .min = 3, .max = 6 }, 372 .n = { .min = 3, .max = 6 },
373 .m = { .min = 2, .max = 256 }, 373 .m = { .min = 2, .max = 256 },
374 /* Pineview only has one combined m divider, which we treat as m2. */ 374 /* Pineview only has one combined m divider, which we treat as m2. */
375 .m1 = { .min = 0, .max = 0 }, 375 .m1 = { .min = 0, .max = 0 },
376 .m2 = { .min = 0, .max = 254 }, 376 .m2 = { .min = 0, .max = 254 },
377 .p = { .min = 5, .max = 80 }, 377 .p = { .min = 5, .max = 80 },
378 .p1 = { .min = 1, .max = 8 }, 378 .p1 = { .min = 1, .max = 8 },
379 .p2 = { .dot_limit = 200000, 379 .p2 = { .dot_limit = 200000,
380 .p2_slow = 10, .p2_fast = 5 }, 380 .p2_slow = 10, .p2_fast = 5 },
381}; 381};
382 382
383static const intel_limit_t intel_limits_pineview_lvds = { 383static const intel_limit_t intel_limits_pineview_lvds = {
384 .dot = { .min = 20000, .max = 400000 }, 384 .dot = { .min = 20000, .max = 400000 },
385 .vco = { .min = 1700000, .max = 3500000 }, 385 .vco = { .min = 1700000, .max = 3500000 },
386 .n = { .min = 3, .max = 6 }, 386 .n = { .min = 3, .max = 6 },
387 .m = { .min = 2, .max = 256 }, 387 .m = { .min = 2, .max = 256 },
388 .m1 = { .min = 0, .max = 0 }, 388 .m1 = { .min = 0, .max = 0 },
389 .m2 = { .min = 0, .max = 254 }, 389 .m2 = { .min = 0, .max = 254 },
390 .p = { .min = 7, .max = 112 }, 390 .p = { .min = 7, .max = 112 },
391 .p1 = { .min = 1, .max = 8 }, 391 .p1 = { .min = 1, .max = 8 },
392 .p2 = { .dot_limit = 112000, 392 .p2 = { .dot_limit = 112000,
393 .p2_slow = 14, .p2_fast = 14 }, 393 .p2_slow = 14, .p2_fast = 14 },
394}; 394};
395 395
396/* Ironlake / Sandybridge 396/* Ironlake / Sandybridge
397 * 397 *
398 * We calculate clock using (register_value + 2) for N/M1/M2, so here 398 * We calculate clock using (register_value + 2) for N/M1/M2, so here
399 * the range value for them is (actual_value - 2). 399 * the range value for them is (actual_value - 2).
400 */ 400 */
401static const intel_limit_t intel_limits_ironlake_dac = { 401static const intel_limit_t intel_limits_ironlake_dac = {
402 .dot = { .min = 25000, .max = 350000 }, 402 .dot = { .min = 25000, .max = 350000 },
403 .vco = { .min = 1760000, .max = 3510000 }, 403 .vco = { .min = 1760000, .max = 3510000 },
404 .n = { .min = 1, .max = 5 }, 404 .n = { .min = 1, .max = 5 },
405 .m = { .min = 79, .max = 127 }, 405 .m = { .min = 79, .max = 127 },
406 .m1 = { .min = 12, .max = 22 }, 406 .m1 = { .min = 12, .max = 22 },
407 .m2 = { .min = 5, .max = 9 }, 407 .m2 = { .min = 5, .max = 9 },
408 .p = { .min = 5, .max = 80 }, 408 .p = { .min = 5, .max = 80 },
409 .p1 = { .min = 1, .max = 8 }, 409 .p1 = { .min = 1, .max = 8 },
410 .p2 = { .dot_limit = 225000, 410 .p2 = { .dot_limit = 225000,
411 .p2_slow = 10, .p2_fast = 5 }, 411 .p2_slow = 10, .p2_fast = 5 },
412}; 412};
413 413
414static const intel_limit_t intel_limits_ironlake_single_lvds = { 414static const intel_limit_t intel_limits_ironlake_single_lvds = {
415 .dot = { .min = 25000, .max = 350000 }, 415 .dot = { .min = 25000, .max = 350000 },
416 .vco = { .min = 1760000, .max = 3510000 }, 416 .vco = { .min = 1760000, .max = 3510000 },
417 .n = { .min = 1, .max = 3 }, 417 .n = { .min = 1, .max = 3 },
418 .m = { .min = 79, .max = 118 }, 418 .m = { .min = 79, .max = 118 },
419 .m1 = { .min = 12, .max = 22 }, 419 .m1 = { .min = 12, .max = 22 },
420 .m2 = { .min = 5, .max = 9 }, 420 .m2 = { .min = 5, .max = 9 },
421 .p = { .min = 28, .max = 112 }, 421 .p = { .min = 28, .max = 112 },
422 .p1 = { .min = 2, .max = 8 }, 422 .p1 = { .min = 2, .max = 8 },
423 .p2 = { .dot_limit = 225000, 423 .p2 = { .dot_limit = 225000,
424 .p2_slow = 14, .p2_fast = 14 }, 424 .p2_slow = 14, .p2_fast = 14 },
425}; 425};
426 426
427static const intel_limit_t intel_limits_ironlake_dual_lvds = { 427static const intel_limit_t intel_limits_ironlake_dual_lvds = {
428 .dot = { .min = 25000, .max = 350000 }, 428 .dot = { .min = 25000, .max = 350000 },
429 .vco = { .min = 1760000, .max = 3510000 }, 429 .vco = { .min = 1760000, .max = 3510000 },
430 .n = { .min = 1, .max = 3 }, 430 .n = { .min = 1, .max = 3 },
431 .m = { .min = 79, .max = 127 }, 431 .m = { .min = 79, .max = 127 },
432 .m1 = { .min = 12, .max = 22 }, 432 .m1 = { .min = 12, .max = 22 },
433 .m2 = { .min = 5, .max = 9 }, 433 .m2 = { .min = 5, .max = 9 },
434 .p = { .min = 14, .max = 56 }, 434 .p = { .min = 14, .max = 56 },
435 .p1 = { .min = 2, .max = 8 }, 435 .p1 = { .min = 2, .max = 8 },
436 .p2 = { .dot_limit = 225000, 436 .p2 = { .dot_limit = 225000,
437 .p2_slow = 7, .p2_fast = 7 }, 437 .p2_slow = 7, .p2_fast = 7 },
438}; 438};
439 439
440/* LVDS 100mhz refclk limits. */ 440/* LVDS 100mhz refclk limits. */
441static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { 441static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
442 .dot = { .min = 25000, .max = 350000 }, 442 .dot = { .min = 25000, .max = 350000 },
443 .vco = { .min = 1760000, .max = 3510000 }, 443 .vco = { .min = 1760000, .max = 3510000 },
444 .n = { .min = 1, .max = 2 }, 444 .n = { .min = 1, .max = 2 },
445 .m = { .min = 79, .max = 126 }, 445 .m = { .min = 79, .max = 126 },
446 .m1 = { .min = 12, .max = 22 }, 446 .m1 = { .min = 12, .max = 22 },
447 .m2 = { .min = 5, .max = 9 }, 447 .m2 = { .min = 5, .max = 9 },
448 .p = { .min = 28, .max = 112 }, 448 .p = { .min = 28, .max = 112 },
449 .p1 = { .min = 2, .max = 8 }, 449 .p1 = { .min = 2, .max = 8 },
450 .p2 = { .dot_limit = 225000, 450 .p2 = { .dot_limit = 225000,
451 .p2_slow = 14, .p2_fast = 14 }, 451 .p2_slow = 14, .p2_fast = 14 },
452}; 452};
453 453
454static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { 454static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
455 .dot = { .min = 25000, .max = 350000 }, 455 .dot = { .min = 25000, .max = 350000 },
456 .vco = { .min = 1760000, .max = 3510000 }, 456 .vco = { .min = 1760000, .max = 3510000 },
457 .n = { .min = 1, .max = 3 }, 457 .n = { .min = 1, .max = 3 },
458 .m = { .min = 79, .max = 126 }, 458 .m = { .min = 79, .max = 126 },
459 .m1 = { .min = 12, .max = 22 }, 459 .m1 = { .min = 12, .max = 22 },
460 .m2 = { .min = 5, .max = 9 }, 460 .m2 = { .min = 5, .max = 9 },
461 .p = { .min = 14, .max = 42 }, 461 .p = { .min = 14, .max = 42 },
462 .p1 = { .min = 2, .max = 6 }, 462 .p1 = { .min = 2, .max = 6 },
463 .p2 = { .dot_limit = 225000, 463 .p2 = { .dot_limit = 225000,
464 .p2_slow = 7, .p2_fast = 7 }, 464 .p2_slow = 7, .p2_fast = 7 },
465}; 465};
466 466
467static const intel_limit_t intel_limits_vlv = { 467static const intel_limit_t intel_limits_vlv = {
468 /* 468 /*
469 * These are the data rate limits (measured in fast clocks) 469 * These are the data rate limits (measured in fast clocks)
470 * since those are the strictest limits we have. The fast 470 * since those are the strictest limits we have. The fast
471 * clock and actual rate limits are more relaxed, so checking 471 * clock and actual rate limits are more relaxed, so checking
472 * them would make no difference. 472 * them would make no difference.
473 */ 473 */
474 .dot = { .min = 25000 * 5, .max = 270000 * 5 }, 474 .dot = { .min = 25000 * 5, .max = 270000 * 5 },
475 .vco = { .min = 4000000, .max = 6000000 }, 475 .vco = { .min = 4000000, .max = 6000000 },
476 .n = { .min = 1, .max = 7 }, 476 .n = { .min = 1, .max = 7 },
477 .m1 = { .min = 2, .max = 3 }, 477 .m1 = { .min = 2, .max = 3 },
478 .m2 = { .min = 11, .max = 156 }, 478 .m2 = { .min = 11, .max = 156 },
479 .p1 = { .min = 2, .max = 3 }, 479 .p1 = { .min = 2, .max = 3 },
480 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */ 480 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
481}; 481};
482 482
483static const intel_limit_t intel_limits_chv = { 483static const intel_limit_t intel_limits_chv = {
484 /* 484 /*
485 * These are the data rate limits (measured in fast clocks) 485 * These are the data rate limits (measured in fast clocks)
486 * since those are the strictest limits we have. The fast 486 * since those are the strictest limits we have. The fast
487 * clock and actual rate limits are more relaxed, so checking 487 * clock and actual rate limits are more relaxed, so checking
488 * them would make no difference. 488 * them would make no difference.
489 */ 489 */
490 .dot = { .min = 25000 * 5, .max = 540000 * 5}, 490 .dot = { .min = 25000 * 5, .max = 540000 * 5},
491 .vco = { .min = 4800000, .max = 6480000 }, 491 .vco = { .min = 4800000, .max = 6480000 },
492 .n = { .min = 1, .max = 1 }, 492 .n = { .min = 1, .max = 1 },
493 .m1 = { .min = 2, .max = 2 }, 493 .m1 = { .min = 2, .max = 2 },
494 .m2 = { .min = 24 << 22, .max = 175 << 22 }, 494 .m2 = { .min = 24 << 22, .max = 175 << 22 },
495 .p1 = { .min = 2, .max = 4 }, 495 .p1 = { .min = 2, .max = 4 },
496 .p2 = { .p2_slow = 1, .p2_fast = 14 }, 496 .p2 = { .p2_slow = 1, .p2_fast = 14 },
497}; 497};
498 498
499static const intel_limit_t intel_limits_bxt = { 499static const intel_limit_t intel_limits_bxt = {
500 /* FIXME: find real dot limits */ 500 /* FIXME: find real dot limits */
501 .dot = { .min = 0, .max = INT_MAX }, 501 .dot = { .min = 0, .max = INT_MAX },
502 .vco = { .min = 4800000, .max = 6700000 }, 502 .vco = { .min = 4800000, .max = 6700000 },
503 .n = { .min = 1, .max = 1 }, 503 .n = { .min = 1, .max = 1 },
504 .m1 = { .min = 2, .max = 2 }, 504 .m1 = { .min = 2, .max = 2 },
505 /* FIXME: find real m2 limits */ 505 /* FIXME: find real m2 limits */
506 .m2 = { .min = 2 << 22, .max = 255 << 22 }, 506 .m2 = { .min = 2 << 22, .max = 255 << 22 },
507 .p1 = { .min = 2, .max = 4 }, 507 .p1 = { .min = 2, .max = 4 },
508 .p2 = { .p2_slow = 1, .p2_fast = 20 }, 508 .p2 = { .p2_slow = 1, .p2_fast = 20 },
509}; 509};
510 510
511static bool 511static bool
512needs_modeset(struct drm_crtc_state *state) 512needs_modeset(struct drm_crtc_state *state)
513{ 513{
514 return drm_atomic_crtc_needs_modeset(state); 514 return drm_atomic_crtc_needs_modeset(state);
515} 515}
516 516
517/** 517/**
518 * Returns whether any output on the specified pipe is of the specified type 518 * Returns whether any output on the specified pipe is of the specified type
519 */ 519 */
520bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type) 520bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type)
521{ 521{
522 struct drm_device *dev = crtc->base.dev; 522 struct drm_device *dev = crtc->base.dev;
523 struct intel_encoder *encoder; 523 struct intel_encoder *encoder;
524 524
525 for_each_encoder_on_crtc(dev, &crtc->base, encoder) 525 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
526 if (encoder->type == type) 526 if (encoder->type == type)
527 return true; 527 return true;
528 528
529 return false; 529 return false;
530} 530}
531 531
532/** 532/**
533 * Returns whether any output on the specified pipe will have the specified 533 * Returns whether any output on the specified pipe will have the specified
534 * type after a staged modeset is complete, i.e., the same as 534 * type after a staged modeset is complete, i.e., the same as
535 * intel_pipe_has_type() but looking at encoder->new_crtc instead of 535 * intel_pipe_has_type() but looking at encoder->new_crtc instead of
536 * encoder->crtc. 536 * encoder->crtc.
537 */ 537 */
538static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state, 538static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state,
539 int type) 539 int type)
540{ 540{
541 struct drm_atomic_state *state = crtc_state->base.state; 541 struct drm_atomic_state *state = crtc_state->base.state;
542 struct drm_connector *connector; 542 struct drm_connector *connector;
543 struct drm_connector_state *connector_state; 543 struct drm_connector_state *connector_state;
544 struct intel_encoder *encoder; 544 struct intel_encoder *encoder;
545 int i, num_connectors = 0; 545 int i, num_connectors = 0;
546 546
547 for_each_connector_in_state(state, connector, connector_state, i) { 547 for_each_connector_in_state(state, connector, connector_state, i) {
548 if (connector_state->crtc != crtc_state->base.crtc) 548 if (connector_state->crtc != crtc_state->base.crtc)
549 continue; 549 continue;
550 550
551 num_connectors++; 551 num_connectors++;
552 552
553 encoder = to_intel_encoder(connector_state->best_encoder); 553 encoder = to_intel_encoder(connector_state->best_encoder);
554 if (encoder->type == type) 554 if (encoder->type == type)
555 return true; 555 return true;
556 } 556 }
557 557
558 WARN_ON(num_connectors == 0); 558 WARN_ON(num_connectors == 0);
559 559
560 return false; 560 return false;
561} 561}
562 562
563static const intel_limit_t * 563static const intel_limit_t *
564intel_ironlake_limit(struct intel_crtc_state *crtc_state, int refclk) 564intel_ironlake_limit(struct intel_crtc_state *crtc_state, int refclk)
565{ 565{
566 struct drm_device *dev = crtc_state->base.crtc->dev; 566 struct drm_device *dev = crtc_state->base.crtc->dev;
567 const intel_limit_t *limit; 567 const intel_limit_t *limit;
568 568
569 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { 569 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
570 if (intel_is_dual_link_lvds(dev)) { 570 if (intel_is_dual_link_lvds(dev)) {
571 if (refclk == 100000) 571 if (refclk == 100000)
572 limit = &intel_limits_ironlake_dual_lvds_100m; 572 limit = &intel_limits_ironlake_dual_lvds_100m;
573 else 573 else
574 limit = &intel_limits_ironlake_dual_lvds; 574 limit = &intel_limits_ironlake_dual_lvds;
575 } else { 575 } else {
576 if (refclk == 100000) 576 if (refclk == 100000)
577 limit = &intel_limits_ironlake_single_lvds_100m; 577 limit = &intel_limits_ironlake_single_lvds_100m;
578 else 578 else
579 limit = &intel_limits_ironlake_single_lvds; 579 limit = &intel_limits_ironlake_single_lvds;
580 } 580 }
581 } else 581 } else
582 limit = &intel_limits_ironlake_dac; 582 limit = &intel_limits_ironlake_dac;
583 583
584 return limit; 584 return limit;
585} 585}
586 586
587static const intel_limit_t * 587static const intel_limit_t *
588intel_g4x_limit(struct intel_crtc_state *crtc_state) 588intel_g4x_limit(struct intel_crtc_state *crtc_state)
589{ 589{
590 struct drm_device *dev = crtc_state->base.crtc->dev; 590 struct drm_device *dev = crtc_state->base.crtc->dev;
591 const intel_limit_t *limit; 591 const intel_limit_t *limit;
592 592
593 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { 593 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
594 if (intel_is_dual_link_lvds(dev)) 594 if (intel_is_dual_link_lvds(dev))
595 limit = &intel_limits_g4x_dual_channel_lvds; 595 limit = &intel_limits_g4x_dual_channel_lvds;
596 else 596 else
597 limit = &intel_limits_g4x_single_channel_lvds; 597 limit = &intel_limits_g4x_single_channel_lvds;
598 } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) || 598 } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) ||
599 intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) { 599 intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
600 limit = &intel_limits_g4x_hdmi; 600 limit = &intel_limits_g4x_hdmi;
601 } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) { 601 } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) {
602 limit = &intel_limits_g4x_sdvo; 602 limit = &intel_limits_g4x_sdvo;
603 } else /* The option is for other outputs */ 603 } else /* The option is for other outputs */
604 limit = &intel_limits_i9xx_sdvo; 604 limit = &intel_limits_i9xx_sdvo;
605 605
606 return limit; 606 return limit;
607} 607}
608 608
609static const intel_limit_t * 609static const intel_limit_t *
610intel_limit(struct intel_crtc_state *crtc_state, int refclk) 610intel_limit(struct intel_crtc_state *crtc_state, int refclk)
611{ 611{
612 struct drm_device *dev = crtc_state->base.crtc->dev; 612 struct drm_device *dev = crtc_state->base.crtc->dev;
613 const intel_limit_t *limit; 613 const intel_limit_t *limit;
614 614
615 if (IS_BROXTON(dev)) 615 if (IS_BROXTON(dev))
616 limit = &intel_limits_bxt; 616 limit = &intel_limits_bxt;
617 else if (HAS_PCH_SPLIT(dev)) 617 else if (HAS_PCH_SPLIT(dev))
618 limit = intel_ironlake_limit(crtc_state, refclk); 618 limit = intel_ironlake_limit(crtc_state, refclk);
619 else if (IS_G4X(dev)) { 619 else if (IS_G4X(dev)) {
620 limit = intel_g4x_limit(crtc_state); 620 limit = intel_g4x_limit(crtc_state);
621 } else if (IS_PINEVIEW(dev)) { 621 } else if (IS_PINEVIEW(dev)) {
622 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) 622 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
623 limit = &intel_limits_pineview_lvds; 623 limit = &intel_limits_pineview_lvds;
624 else 624 else
625 limit = &intel_limits_pineview_sdvo; 625 limit = &intel_limits_pineview_sdvo;
626 } else if (IS_CHERRYVIEW(dev)) { 626 } else if (IS_CHERRYVIEW(dev)) {
627 limit = &intel_limits_chv; 627 limit = &intel_limits_chv;
628 } else if (IS_VALLEYVIEW(dev)) { 628 } else if (IS_VALLEYVIEW(dev)) {
629 limit = &intel_limits_vlv; 629 limit = &intel_limits_vlv;
630 } else if (!IS_GEN2(dev)) { 630 } else if (!IS_GEN2(dev)) {
631 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) 631 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
632 limit = &intel_limits_i9xx_lvds; 632 limit = &intel_limits_i9xx_lvds;
633 else 633 else
634 limit = &intel_limits_i9xx_sdvo; 634 limit = &intel_limits_i9xx_sdvo;
635 } else { 635 } else {
636 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) 636 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
637 limit = &intel_limits_i8xx_lvds; 637 limit = &intel_limits_i8xx_lvds;
638 else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO)) 638 else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
639 limit = &intel_limits_i8xx_dvo; 639 limit = &intel_limits_i8xx_dvo;
640 else 640 else
641 limit = &intel_limits_i8xx_dac; 641 limit = &intel_limits_i8xx_dac;
642 } 642 }
643 return limit; 643 return limit;
644} 644}
645 645
646/* 646/*
647 * Platform specific helpers to calculate the port PLL loopback- (clock.m), 647 * Platform specific helpers to calculate the port PLL loopback- (clock.m),
648 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast 648 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
649 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic. 649 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
650 * The helpers' return value is the rate of the clock that is fed to the 650 * The helpers' return value is the rate of the clock that is fed to the
651 * display engine's pipe which can be the above fast dot clock rate or a 651 * display engine's pipe which can be the above fast dot clock rate or a
652 * divided-down version of it. 652 * divided-down version of it.
653 */ 653 */
654/* m1 is reserved as 0 in Pineview, n is a ring counter */ 654/* m1 is reserved as 0 in Pineview, n is a ring counter */
655static int pnv_calc_dpll_params(int refclk, intel_clock_t *clock) 655static int pnv_calc_dpll_params(int refclk, intel_clock_t *clock)
656{ 656{
657 clock->m = clock->m2 + 2; 657 clock->m = clock->m2 + 2;
658 clock->p = clock->p1 * clock->p2; 658 clock->p = clock->p1 * clock->p2;
659 if (WARN_ON(clock->n == 0 || clock->p == 0)) 659 if (WARN_ON(clock->n == 0 || clock->p == 0))
660 return 0; 660 return 0;
661 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 661 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
662 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 662 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
663 663
664 return clock->dot; 664 return clock->dot;
665} 665}
666 666
667static uint32_t i9xx_dpll_compute_m(struct dpll *dpll) 667static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
668{ 668{
669 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2); 669 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
670} 670}
671 671
672static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock) 672static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock)
673{ 673{
674 clock->m = i9xx_dpll_compute_m(clock); 674 clock->m = i9xx_dpll_compute_m(clock);
675 clock->p = clock->p1 * clock->p2; 675 clock->p = clock->p1 * clock->p2;
676 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0)) 676 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
677 return 0; 677 return 0;
678 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2); 678 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
679 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 679 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
680 680
681 return clock->dot; 681 return clock->dot;
682} 682}
683 683
684static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock) 684static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock)
685{ 685{
686 clock->m = clock->m1 * clock->m2; 686 clock->m = clock->m1 * clock->m2;
687 clock->p = clock->p1 * clock->p2; 687 clock->p = clock->p1 * clock->p2;
688 if (WARN_ON(clock->n == 0 || clock->p == 0)) 688 if (WARN_ON(clock->n == 0 || clock->p == 0))
689 return 0; 689 return 0;
690 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 690 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
691 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 691 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
692 692
693 return clock->dot / 5; 693 return clock->dot / 5;
694} 694}
695 695
696int chv_calc_dpll_params(int refclk, intel_clock_t *clock) 696int chv_calc_dpll_params(int refclk, intel_clock_t *clock)
697{ 697{
698 clock->m = clock->m1 * clock->m2; 698 clock->m = clock->m1 * clock->m2;
699 clock->p = clock->p1 * clock->p2; 699 clock->p = clock->p1 * clock->p2;
700 if (WARN_ON(clock->n == 0 || clock->p == 0)) 700 if (WARN_ON(clock->n == 0 || clock->p == 0))
701 return 0; 701 return 0;
702 clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m, 702 clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
703 clock->n << 22); 703 clock->n << 22);
704 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 704 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
705 705
706 return clock->dot / 5; 706 return clock->dot / 5;
707} 707}
708 708
709#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) 709#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
710/** 710/**
711 * Returns whether the given set of divisors are valid for a given refclk with 711 * Returns whether the given set of divisors are valid for a given refclk with
712 * the given connectors. 712 * the given connectors.
713 */ 713 */
714 714
715static bool intel_PLL_is_valid(struct drm_device *dev, 715static bool intel_PLL_is_valid(struct drm_device *dev,
716 const intel_limit_t *limit, 716 const intel_limit_t *limit,
717 const intel_clock_t *clock) 717 const intel_clock_t *clock)
718{ 718{
719 if (clock->n < limit->n.min || limit->n.max < clock->n) 719 if (clock->n < limit->n.min || limit->n.max < clock->n)
720 INTELPllInvalid("n out of range\n"); 720 INTELPllInvalid("n out of range\n");
721 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) 721 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
722 INTELPllInvalid("p1 out of range\n"); 722 INTELPllInvalid("p1 out of range\n");
723 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) 723 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
724 INTELPllInvalid("m2 out of range\n"); 724 INTELPllInvalid("m2 out of range\n");
725 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) 725 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
726 INTELPllInvalid("m1 out of range\n"); 726 INTELPllInvalid("m1 out of range\n");
727 727
728 if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev) && !IS_BROXTON(dev)) 728 if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev) && !IS_BROXTON(dev))
729 if (clock->m1 <= clock->m2) 729 if (clock->m1 <= clock->m2)
730 INTELPllInvalid("m1 <= m2\n"); 730 INTELPllInvalid("m1 <= m2\n");
731 731
732 if (!IS_VALLEYVIEW(dev) && !IS_BROXTON(dev)) { 732 if (!IS_VALLEYVIEW(dev) && !IS_BROXTON(dev)) {
733 if (clock->p < limit->p.min || limit->p.max < clock->p) 733 if (clock->p < limit->p.min || limit->p.max < clock->p)
734 INTELPllInvalid("p out of range\n"); 734 INTELPllInvalid("p out of range\n");
735 if (clock->m < limit->m.min || limit->m.max < clock->m) 735 if (clock->m < limit->m.min || limit->m.max < clock->m)
736 INTELPllInvalid("m out of range\n"); 736 INTELPllInvalid("m out of range\n");
737 } 737 }
738 738
739 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) 739 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
740 INTELPllInvalid("vco out of range\n"); 740 INTELPllInvalid("vco out of range\n");
741 /* XXX: We may need to be checking "Dot clock" depending on the multiplier, 741 /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
742 * connector, etc., rather than just a single range. 742 * connector, etc., rather than just a single range.
743 */ 743 */
744 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) 744 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
745 INTELPllInvalid("dot out of range\n"); 745 INTELPllInvalid("dot out of range\n");
746 746
747 return true; 747 return true;
748} 748}
749 749
750static int 750static int
751i9xx_select_p2_div(const intel_limit_t *limit, 751i9xx_select_p2_div(const intel_limit_t *limit,
752 const struct intel_crtc_state *crtc_state, 752 const struct intel_crtc_state *crtc_state,
753 int target) 753 int target)
754{ 754{
755 struct drm_device *dev = crtc_state->base.crtc->dev; 755 struct drm_device *dev = crtc_state->base.crtc->dev;
756 756
757 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { 757 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
758 /* 758 /*
759 * For LVDS just rely on its current settings for dual-channel. 759 * For LVDS just rely on its current settings for dual-channel.
760 * We haven't figured out how to reliably set up different 760 * We haven't figured out how to reliably set up different
761 * single/dual channel state, if we even can. 761 * single/dual channel state, if we even can.
762 */ 762 */
763 if (intel_is_dual_link_lvds(dev)) 763 if (intel_is_dual_link_lvds(dev))
764 return limit->p2.p2_fast; 764 return limit->p2.p2_fast;
765 else 765 else
766 return limit->p2.p2_slow; 766 return limit->p2.p2_slow;
767 } else { 767 } else {
768 if (target < limit->p2.dot_limit) 768 if (target < limit->p2.dot_limit)
769 return limit->p2.p2_slow; 769 return limit->p2.p2_slow;
770 else 770 else
771 return limit->p2.p2_fast; 771 return limit->p2.p2_fast;
772 } 772 }
773} 773}
774 774
775static bool 775static bool
776i9xx_find_best_dpll(const intel_limit_t *limit, 776i9xx_find_best_dpll(const intel_limit_t *limit,
777 struct intel_crtc_state *crtc_state, 777 struct intel_crtc_state *crtc_state,
778 int target, int refclk, intel_clock_t *match_clock, 778 int target, int refclk, intel_clock_t *match_clock,
779 intel_clock_t *best_clock) 779 intel_clock_t *best_clock)
780{ 780{
781 struct drm_device *dev = crtc_state->base.crtc->dev; 781 struct drm_device *dev = crtc_state->base.crtc->dev;
782 intel_clock_t clock; 782 intel_clock_t clock;
783 int err = target; 783 int err = target;
784 784
785 memset(best_clock, 0, sizeof(*best_clock)); 785 memset(best_clock, 0, sizeof(*best_clock));
786 786
787 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 787 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
788 788
789 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 789 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
790 clock.m1++) { 790 clock.m1++) {
791 for (clock.m2 = limit->m2.min; 791 for (clock.m2 = limit->m2.min;
792 clock.m2 <= limit->m2.max; clock.m2++) { 792 clock.m2 <= limit->m2.max; clock.m2++) {
793 if (clock.m2 >= clock.m1) 793 if (clock.m2 >= clock.m1)
794 break; 794 break;
795 for (clock.n = limit->n.min; 795 for (clock.n = limit->n.min;
796 clock.n <= limit->n.max; clock.n++) { 796 clock.n <= limit->n.max; clock.n++) {
797 for (clock.p1 = limit->p1.min; 797 for (clock.p1 = limit->p1.min;
798 clock.p1 <= limit->p1.max; clock.p1++) { 798 clock.p1 <= limit->p1.max; clock.p1++) {
799 int this_err; 799 int this_err;
800 800
801 i9xx_calc_dpll_params(refclk, &clock); 801 i9xx_calc_dpll_params(refclk, &clock);
802 if (!intel_PLL_is_valid(dev, limit, 802 if (!intel_PLL_is_valid(dev, limit,
803 &clock)) 803 &clock))
804 continue; 804 continue;
805 if (match_clock && 805 if (match_clock &&
806 clock.p != match_clock->p) 806 clock.p != match_clock->p)
807 continue; 807 continue;
808 808
809 this_err = abs(clock.dot - target); 809 this_err = abs(clock.dot - target);
810 if (this_err < err) { 810 if (this_err < err) {
811 *best_clock = clock; 811 *best_clock = clock;
812 err = this_err; 812 err = this_err;
813 } 813 }
814 } 814 }
815 } 815 }
816 } 816 }
817 } 817 }
818 818
819 return (err != target); 819 return (err != target);
820} 820}
821 821
822static bool 822static bool
823pnv_find_best_dpll(const intel_limit_t *limit, 823pnv_find_best_dpll(const intel_limit_t *limit,
824 struct intel_crtc_state *crtc_state, 824 struct intel_crtc_state *crtc_state,
825 int target, int refclk, intel_clock_t *match_clock, 825 int target, int refclk, intel_clock_t *match_clock,
826 intel_clock_t *best_clock) 826 intel_clock_t *best_clock)
827{ 827{
828 struct drm_device *dev = crtc_state->base.crtc->dev; 828 struct drm_device *dev = crtc_state->base.crtc->dev;
829 intel_clock_t clock; 829 intel_clock_t clock;
830 int err = target; 830 int err = target;
831 831
832 memset(best_clock, 0, sizeof(*best_clock)); 832 memset(best_clock, 0, sizeof(*best_clock));
833 833
834 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 834 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
835 835
836 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 836 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
837 clock.m1++) { 837 clock.m1++) {
838 for (clock.m2 = limit->m2.min; 838 for (clock.m2 = limit->m2.min;
839 clock.m2 <= limit->m2.max; clock.m2++) { 839 clock.m2 <= limit->m2.max; clock.m2++) {
840 for (clock.n = limit->n.min; 840 for (clock.n = limit->n.min;
841 clock.n <= limit->n.max; clock.n++) { 841 clock.n <= limit->n.max; clock.n++) {
842 for (clock.p1 = limit->p1.min; 842 for (clock.p1 = limit->p1.min;
843 clock.p1 <= limit->p1.max; clock.p1++) { 843 clock.p1 <= limit->p1.max; clock.p1++) {
844 int this_err; 844 int this_err;
845 845
846 pnv_calc_dpll_params(refclk, &clock); 846 pnv_calc_dpll_params(refclk, &clock);
847 if (!intel_PLL_is_valid(dev, limit, 847 if (!intel_PLL_is_valid(dev, limit,
848 &clock)) 848 &clock))
849 continue; 849 continue;
850 if (match_clock && 850 if (match_clock &&
851 clock.p != match_clock->p) 851 clock.p != match_clock->p)
852 continue; 852 continue;
853 853
854 this_err = abs(clock.dot - target); 854 this_err = abs(clock.dot - target);
855 if (this_err < err) { 855 if (this_err < err) {
856 *best_clock = clock; 856 *best_clock = clock;
857 err = this_err; 857 err = this_err;
858 } 858 }
859 } 859 }
860 } 860 }
861 } 861 }
862 } 862 }
863 863
864 return (err != target); 864 return (err != target);
865} 865}
866 866
867static bool 867static bool
868g4x_find_best_dpll(const intel_limit_t *limit, 868g4x_find_best_dpll(const intel_limit_t *limit,
869 struct intel_crtc_state *crtc_state, 869 struct intel_crtc_state *crtc_state,
870 int target, int refclk, intel_clock_t *match_clock, 870 int target, int refclk, intel_clock_t *match_clock,
871 intel_clock_t *best_clock) 871 intel_clock_t *best_clock)
872{ 872{
873 struct drm_device *dev = crtc_state->base.crtc->dev; 873 struct drm_device *dev = crtc_state->base.crtc->dev;
874 intel_clock_t clock; 874 intel_clock_t clock;
875 int max_n; 875 int max_n;
876 bool found = false; 876 bool found = false;
877 /* approximately equals target * 0.00585 */ 877 /* approximately equals target * 0.00585 */
878 int err_most = (target >> 8) + (target >> 9); 878 int err_most = (target >> 8) + (target >> 9);
879 879
880 memset(best_clock, 0, sizeof(*best_clock)); 880 memset(best_clock, 0, sizeof(*best_clock));
881 881
882 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 882 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
883 883
884 max_n = limit->n.max; 884 max_n = limit->n.max;
885 /* based on hardware requirement, prefer smaller n to precision */ 885 /* based on hardware requirement, prefer smaller n to precision */
886 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 886 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
887 /* based on hardware requirement, prefere larger m1,m2 */ 887 /* based on hardware requirement, prefere larger m1,m2 */
888 for (clock.m1 = limit->m1.max; 888 for (clock.m1 = limit->m1.max;
889 clock.m1 >= limit->m1.min; clock.m1--) { 889 clock.m1 >= limit->m1.min; clock.m1--) {
890 for (clock.m2 = limit->m2.max; 890 for (clock.m2 = limit->m2.max;
891 clock.m2 >= limit->m2.min; clock.m2--) { 891 clock.m2 >= limit->m2.min; clock.m2--) {
892 for (clock.p1 = limit->p1.max; 892 for (clock.p1 = limit->p1.max;
893 clock.p1 >= limit->p1.min; clock.p1--) { 893 clock.p1 >= limit->p1.min; clock.p1--) {
894 int this_err; 894 int this_err;
895 895
896 i9xx_calc_dpll_params(refclk, &clock); 896 i9xx_calc_dpll_params(refclk, &clock);
897 if (!intel_PLL_is_valid(dev, limit, 897 if (!intel_PLL_is_valid(dev, limit,
898 &clock)) 898 &clock))
899 continue; 899 continue;
900 900
901 this_err = abs(clock.dot - target); 901 this_err = abs(clock.dot - target);
902 if (this_err < err_most) { 902 if (this_err < err_most) {
903 *best_clock = clock; 903 *best_clock = clock;
904 err_most = this_err; 904 err_most = this_err;
905 max_n = clock.n; 905 max_n = clock.n;
906 found = true; 906 found = true;
907 } 907 }
908 } 908 }
909 } 909 }
910 } 910 }
911 } 911 }
912 return found; 912 return found;
913} 913}
914 914
915/* 915/*
916 * Check if the calculated PLL configuration is more optimal compared to the 916 * Check if the calculated PLL configuration is more optimal compared to the
917 * best configuration and error found so far. Return the calculated error. 917 * best configuration and error found so far. Return the calculated error.
918 */ 918 */
919static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq, 919static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
920 const intel_clock_t *calculated_clock, 920 const intel_clock_t *calculated_clock,
921 const intel_clock_t *best_clock, 921 const intel_clock_t *best_clock,
922 unsigned int best_error_ppm, 922 unsigned int best_error_ppm,
923 unsigned int *error_ppm) 923 unsigned int *error_ppm)
924{ 924{
925 /* 925 /*
926 * For CHV ignore the error and consider only the P value. 926 * For CHV ignore the error and consider only the P value.
927 * Prefer a bigger P value based on HW requirements. 927 * Prefer a bigger P value based on HW requirements.
928 */ 928 */
929 if (IS_CHERRYVIEW(dev)) { 929 if (IS_CHERRYVIEW(dev)) {
930 *error_ppm = 0; 930 *error_ppm = 0;
931 931
932 return calculated_clock->p > best_clock->p; 932 return calculated_clock->p > best_clock->p;
933 } 933 }
934 934
935 if (WARN_ON_ONCE(!target_freq)) 935 if (WARN_ON_ONCE(!target_freq))
936 return false; 936 return false;
937 937
938 *error_ppm = div_u64(1000000ULL * 938 *error_ppm = div_u64(1000000ULL *
939 abs(target_freq - calculated_clock->dot), 939 abs(target_freq - calculated_clock->dot),
940 target_freq); 940 target_freq);
941 /* 941 /*
942 * Prefer a better P value over a better (smaller) error if the error 942 * Prefer a better P value over a better (smaller) error if the error
943 * is small. Ensure this preference for future configurations too by 943 * is small. Ensure this preference for future configurations too by
944 * setting the error to 0. 944 * setting the error to 0.
945 */ 945 */
946 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) { 946 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
947 *error_ppm = 0; 947 *error_ppm = 0;
948 948
949 return true; 949 return true;
950 } 950 }
951 951
952 return *error_ppm + 10 < best_error_ppm; 952 return *error_ppm + 10 < best_error_ppm;
953} 953}
954 954
955static bool 955static bool
956vlv_find_best_dpll(const intel_limit_t *limit, 956vlv_find_best_dpll(const intel_limit_t *limit,
957 struct intel_crtc_state *crtc_state, 957 struct intel_crtc_state *crtc_state,
958 int target, int refclk, intel_clock_t *match_clock, 958 int target, int refclk, intel_clock_t *match_clock,
959 intel_clock_t *best_clock) 959 intel_clock_t *best_clock)
960{ 960{
961 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 961 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
962 struct drm_device *dev = crtc->base.dev; 962 struct drm_device *dev = crtc->base.dev;
963 intel_clock_t clock; 963 intel_clock_t clock;
964 unsigned int bestppm = 1000000; 964 unsigned int bestppm = 1000000;
965 /* min update 19.2 MHz */ 965 /* min update 19.2 MHz */
966 int max_n = min(limit->n.max, refclk / 19200); 966 int max_n = min(limit->n.max, refclk / 19200);
967 bool found = false; 967 bool found = false;
968 968
969 target *= 5; /* fast clock */ 969 target *= 5; /* fast clock */
970 970
971 memset(best_clock, 0, sizeof(*best_clock)); 971 memset(best_clock, 0, sizeof(*best_clock));
972 972
973 /* based on hardware requirement, prefer smaller n to precision */ 973 /* based on hardware requirement, prefer smaller n to precision */
974 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 974 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
975 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 975 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
976 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow; 976 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
977 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 977 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
978 clock.p = clock.p1 * clock.p2; 978 clock.p = clock.p1 * clock.p2;
979 /* based on hardware requirement, prefer bigger m1,m2 values */ 979 /* based on hardware requirement, prefer bigger m1,m2 values */
980 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { 980 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
981 unsigned int ppm; 981 unsigned int ppm;
982 982
983 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n, 983 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
984 refclk * clock.m1); 984 refclk * clock.m1);
985 985
986 vlv_calc_dpll_params(refclk, &clock); 986 vlv_calc_dpll_params(refclk, &clock);
987 987
988 if (!intel_PLL_is_valid(dev, limit, 988 if (!intel_PLL_is_valid(dev, limit,
989 &clock)) 989 &clock))
990 continue; 990 continue;
991 991
992 if (!vlv_PLL_is_optimal(dev, target, 992 if (!vlv_PLL_is_optimal(dev, target,
993 &clock, 993 &clock,
994 best_clock, 994 best_clock,
995 bestppm, &ppm)) 995 bestppm, &ppm))
996 continue; 996 continue;
997 997
998 *best_clock = clock; 998 *best_clock = clock;
999 bestppm = ppm; 999 bestppm = ppm;
1000 found = true; 1000 found = true;
1001 } 1001 }
1002 } 1002 }
1003 } 1003 }
1004 } 1004 }
1005 1005
1006 return found; 1006 return found;
1007} 1007}
1008 1008
1009static bool 1009static bool
1010chv_find_best_dpll(const intel_limit_t *limit, 1010chv_find_best_dpll(const intel_limit_t *limit,
1011 struct intel_crtc_state *crtc_state, 1011 struct intel_crtc_state *crtc_state,
1012 int target, int refclk, intel_clock_t *match_clock, 1012 int target, int refclk, intel_clock_t *match_clock,
1013 intel_clock_t *best_clock) 1013 intel_clock_t *best_clock)
1014{ 1014{
1015 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 1015 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1016 struct drm_device *dev = crtc->base.dev; 1016 struct drm_device *dev = crtc->base.dev;
1017 unsigned int best_error_ppm; 1017 unsigned int best_error_ppm;
1018 intel_clock_t clock; 1018 intel_clock_t clock;
1019 uint64_t m2; 1019 uint64_t m2;
1020 int found = false; 1020 int found = false;
1021 1021
1022 memset(best_clock, 0, sizeof(*best_clock)); 1022 memset(best_clock, 0, sizeof(*best_clock));
1023 best_error_ppm = 1000000; 1023 best_error_ppm = 1000000;
1024 1024
1025 /* 1025 /*
1026 * Based on hardware doc, the n always set to 1, and m1 always 1026 * Based on hardware doc, the n always set to 1, and m1 always
1027 * set to 2. If requires to support 200Mhz refclk, we need to 1027 * set to 2. If requires to support 200Mhz refclk, we need to
1028 * revisit this because n may not 1 anymore. 1028 * revisit this because n may not 1 anymore.
1029 */ 1029 */
@@ -13774,2180 +13774,2185 @@ static struct drm_plane *intel_primary_p @@ -13774,2180 +13774,2185 @@ static struct drm_plane *intel_primary_p
13774 primary->check_plane = intel_check_primary_plane; 13774 primary->check_plane = intel_check_primary_plane;
13775 primary->commit_plane = intel_commit_primary_plane; 13775 primary->commit_plane = intel_commit_primary_plane;
13776 primary->disable_plane = intel_disable_primary_plane; 13776 primary->disable_plane = intel_disable_primary_plane;
13777 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) 13777 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
13778 primary->plane = !pipe; 13778 primary->plane = !pipe;
13779 13779
13780 if (INTEL_INFO(dev)->gen >= 9) { 13780 if (INTEL_INFO(dev)->gen >= 9) {
13781 intel_primary_formats = skl_primary_formats; 13781 intel_primary_formats = skl_primary_formats;
13782 num_formats = ARRAY_SIZE(skl_primary_formats); 13782 num_formats = ARRAY_SIZE(skl_primary_formats);
13783 } else if (INTEL_INFO(dev)->gen >= 4) { 13783 } else if (INTEL_INFO(dev)->gen >= 4) {
13784 intel_primary_formats = i965_primary_formats; 13784 intel_primary_formats = i965_primary_formats;
13785 num_formats = ARRAY_SIZE(i965_primary_formats); 13785 num_formats = ARRAY_SIZE(i965_primary_formats);
13786 } else { 13786 } else {
13787 intel_primary_formats = i8xx_primary_formats; 13787 intel_primary_formats = i8xx_primary_formats;
13788 num_formats = ARRAY_SIZE(i8xx_primary_formats); 13788 num_formats = ARRAY_SIZE(i8xx_primary_formats);
13789 } 13789 }
13790 13790
13791 drm_universal_plane_init(dev, &primary->base, 0, 13791 drm_universal_plane_init(dev, &primary->base, 0,
13792 &intel_plane_funcs, 13792 &intel_plane_funcs,
13793 intel_primary_formats, num_formats, 13793 intel_primary_formats, num_formats,
13794 DRM_PLANE_TYPE_PRIMARY); 13794 DRM_PLANE_TYPE_PRIMARY);
13795 13795
13796 if (INTEL_INFO(dev)->gen >= 4) 13796 if (INTEL_INFO(dev)->gen >= 4)
13797 intel_create_rotation_property(dev, primary); 13797 intel_create_rotation_property(dev, primary);
13798 13798
13799 drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs); 13799 drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
13800 13800
13801 return &primary->base; 13801 return &primary->base;
13802} 13802}
13803 13803
13804void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane) 13804void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane)
13805{ 13805{
13806 if (!dev->mode_config.rotation_property) { 13806 if (!dev->mode_config.rotation_property) {
13807 unsigned long flags = BIT(DRM_ROTATE_0) | 13807 unsigned long flags = BIT(DRM_ROTATE_0) |
13808 BIT(DRM_ROTATE_180); 13808 BIT(DRM_ROTATE_180);
13809 13809
13810 if (INTEL_INFO(dev)->gen >= 9) 13810 if (INTEL_INFO(dev)->gen >= 9)
13811 flags |= BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270); 13811 flags |= BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270);
13812 13812
13813 dev->mode_config.rotation_property = 13813 dev->mode_config.rotation_property =
13814 drm_mode_create_rotation_property(dev, flags); 13814 drm_mode_create_rotation_property(dev, flags);
13815 } 13815 }
13816 if (dev->mode_config.rotation_property) 13816 if (dev->mode_config.rotation_property)
13817 drm_object_attach_property(&plane->base.base, 13817 drm_object_attach_property(&plane->base.base,
13818 dev->mode_config.rotation_property, 13818 dev->mode_config.rotation_property,
13819 plane->base.state->rotation); 13819 plane->base.state->rotation);
13820} 13820}
13821 13821
13822static int 13822static int
13823intel_check_cursor_plane(struct drm_plane *plane, 13823intel_check_cursor_plane(struct drm_plane *plane,
13824 struct intel_crtc_state *crtc_state, 13824 struct intel_crtc_state *crtc_state,
13825 struct intel_plane_state *state) 13825 struct intel_plane_state *state)
13826{ 13826{
13827 struct drm_crtc *crtc = crtc_state->base.crtc; 13827 struct drm_crtc *crtc = crtc_state->base.crtc;
13828 struct drm_framebuffer *fb = state->base.fb; 13828 struct drm_framebuffer *fb = state->base.fb;
13829 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 13829 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13830 enum i915_pipe pipe = to_intel_plane(plane)->pipe; 13830 enum i915_pipe pipe = to_intel_plane(plane)->pipe;
13831 unsigned stride; 13831 unsigned stride;
13832 int ret; 13832 int ret;
13833 13833
13834 ret = drm_plane_helper_check_update(plane, crtc, fb, &state->src, 13834 ret = drm_plane_helper_check_update(plane, crtc, fb, &state->src,
13835 &state->dst, &state->clip, 13835 &state->dst, &state->clip,
13836 DRM_PLANE_HELPER_NO_SCALING, 13836 DRM_PLANE_HELPER_NO_SCALING,
13837 DRM_PLANE_HELPER_NO_SCALING, 13837 DRM_PLANE_HELPER_NO_SCALING,
13838 true, true, &state->visible); 13838 true, true, &state->visible);
13839 if (ret) 13839 if (ret)
13840 return ret; 13840 return ret;
13841 13841
13842 /* if we want to turn off the cursor ignore width and height */ 13842 /* if we want to turn off the cursor ignore width and height */
13843 if (!obj) 13843 if (!obj)
13844 return 0; 13844 return 0;
13845 13845
13846 /* Check for which cursor types we support */ 13846 /* Check for which cursor types we support */
13847 if (!cursor_size_ok(plane->dev, state->base.crtc_w, state->base.crtc_h)) { 13847 if (!cursor_size_ok(plane->dev, state->base.crtc_w, state->base.crtc_h)) {
13848 DRM_DEBUG("Cursor dimension %dx%d not supported\n", 13848 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
13849 state->base.crtc_w, state->base.crtc_h); 13849 state->base.crtc_w, state->base.crtc_h);
13850 return -EINVAL; 13850 return -EINVAL;
13851 } 13851 }
13852 13852
13853 stride = roundup_pow_of_two(state->base.crtc_w) * 4; 13853 stride = roundup_pow_of_two(state->base.crtc_w) * 4;
13854 if (obj->base.size < stride * state->base.crtc_h) { 13854 if (obj->base.size < stride * state->base.crtc_h) {
13855 DRM_DEBUG_KMS("buffer is too small\n"); 13855 DRM_DEBUG_KMS("buffer is too small\n");
13856 return -ENOMEM; 13856 return -ENOMEM;
13857 } 13857 }
13858 13858
13859 if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) { 13859 if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) {
13860 DRM_DEBUG_KMS("cursor cannot be tiled\n"); 13860 DRM_DEBUG_KMS("cursor cannot be tiled\n");
13861 return -EINVAL; 13861 return -EINVAL;
13862 } 13862 }
13863 13863
13864 /* 13864 /*
13865 * There's something wrong with the cursor on CHV pipe C. 13865 * There's something wrong with the cursor on CHV pipe C.
13866 * If it straddles the left edge of the screen then 13866 * If it straddles the left edge of the screen then
13867 * moving it away from the edge or disabling it often 13867 * moving it away from the edge or disabling it often
13868 * results in a pipe underrun, and often that can lead to 13868 * results in a pipe underrun, and often that can lead to
13869 * dead pipe (constant underrun reported, and it scans 13869 * dead pipe (constant underrun reported, and it scans
13870 * out just a solid color). To recover from that, the 13870 * out just a solid color). To recover from that, the
13871 * display power well must be turned off and on again. 13871 * display power well must be turned off and on again.
13872 * Refuse the put the cursor into that compromised position. 13872 * Refuse the put the cursor into that compromised position.
13873 */ 13873 */
13874 if (IS_CHERRYVIEW(plane->dev) && pipe == PIPE_C && 13874 if (IS_CHERRYVIEW(plane->dev) && pipe == PIPE_C &&
13875 state->visible && state->base.crtc_x < 0) { 13875 state->visible && state->base.crtc_x < 0) {
13876 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n"); 13876 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
13877 return -EINVAL; 13877 return -EINVAL;
13878 } 13878 }
13879 13879
13880 return 0; 13880 return 0;
13881} 13881}
13882 13882
13883static void 13883static void
13884intel_disable_cursor_plane(struct drm_plane *plane, 13884intel_disable_cursor_plane(struct drm_plane *plane,
13885 struct drm_crtc *crtc) 13885 struct drm_crtc *crtc)
13886{ 13886{
13887 intel_crtc_update_cursor(crtc, false); 13887 intel_crtc_update_cursor(crtc, false);
13888} 13888}
13889 13889
13890static void 13890static void
13891intel_commit_cursor_plane(struct drm_plane *plane, 13891intel_commit_cursor_plane(struct drm_plane *plane,
13892 struct intel_plane_state *state) 13892 struct intel_plane_state *state)
13893{ 13893{
13894 struct drm_crtc *crtc = state->base.crtc; 13894 struct drm_crtc *crtc = state->base.crtc;
13895 struct drm_device *dev = plane->dev; 13895 struct drm_device *dev = plane->dev;
13896 struct intel_crtc *intel_crtc; 13896 struct intel_crtc *intel_crtc;
13897 struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb); 13897 struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb);
13898 uint32_t addr; 13898 uint32_t addr;
13899 13899
13900 crtc = crtc ? crtc : plane->crtc; 13900 crtc = crtc ? crtc : plane->crtc;
13901 intel_crtc = to_intel_crtc(crtc); 13901 intel_crtc = to_intel_crtc(crtc);
13902 13902
13903 if (!obj) 13903 if (!obj)
13904 addr = 0; 13904 addr = 0;
13905 else if (!INTEL_INFO(dev)->cursor_needs_physical) 13905 else if (!INTEL_INFO(dev)->cursor_needs_physical)
13906 addr = i915_gem_obj_ggtt_offset(obj); 13906 addr = i915_gem_obj_ggtt_offset(obj);
13907 else 13907 else
13908 addr = obj->phys_handle->busaddr; 13908 addr = obj->phys_handle->busaddr;
13909 13909
13910 intel_crtc->cursor_addr = addr; 13910 intel_crtc->cursor_addr = addr;
13911 13911
13912 if (crtc->state->active) 13912 if (crtc->state->active)
13913 intel_crtc_update_cursor(crtc, state->visible); 13913 intel_crtc_update_cursor(crtc, state->visible);
13914} 13914}
13915 13915
13916static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev, 13916static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
13917 int pipe) 13917 int pipe)
13918{ 13918{
13919 struct intel_plane *cursor; 13919 struct intel_plane *cursor;
13920 struct intel_plane_state *state; 13920 struct intel_plane_state *state;
13921 13921
13922 cursor = kzalloc(sizeof(*cursor), GFP_KERNEL); 13922 cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
13923 if (cursor == NULL) 13923 if (cursor == NULL)
13924 return NULL; 13924 return NULL;
13925 13925
13926 state = intel_create_plane_state(&cursor->base); 13926 state = intel_create_plane_state(&cursor->base);
13927 if (!state) { 13927 if (!state) {
13928 kfree(cursor); 13928 kfree(cursor);
13929 return NULL; 13929 return NULL;
13930 } 13930 }
13931 cursor->base.state = &state->base; 13931 cursor->base.state = &state->base;
13932 13932
13933 cursor->can_scale = false; 13933 cursor->can_scale = false;
13934 cursor->max_downscale = 1; 13934 cursor->max_downscale = 1;
13935 cursor->pipe = pipe; 13935 cursor->pipe = pipe;
13936 cursor->plane = pipe; 13936 cursor->plane = pipe;
13937 cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe); 13937 cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe);
13938 cursor->check_plane = intel_check_cursor_plane; 13938 cursor->check_plane = intel_check_cursor_plane;
13939 cursor->commit_plane = intel_commit_cursor_plane; 13939 cursor->commit_plane = intel_commit_cursor_plane;
13940 cursor->disable_plane = intel_disable_cursor_plane; 13940 cursor->disable_plane = intel_disable_cursor_plane;
13941 13941
13942 drm_universal_plane_init(dev, &cursor->base, 0, 13942 drm_universal_plane_init(dev, &cursor->base, 0,
13943 &intel_plane_funcs, 13943 &intel_plane_funcs,
13944 intel_cursor_formats, 13944 intel_cursor_formats,
13945 ARRAY_SIZE(intel_cursor_formats), 13945 ARRAY_SIZE(intel_cursor_formats),
13946 DRM_PLANE_TYPE_CURSOR); 13946 DRM_PLANE_TYPE_CURSOR);
13947 13947
13948 if (INTEL_INFO(dev)->gen >= 4) { 13948 if (INTEL_INFO(dev)->gen >= 4) {
13949 if (!dev->mode_config.rotation_property) 13949 if (!dev->mode_config.rotation_property)
13950 dev->mode_config.rotation_property = 13950 dev->mode_config.rotation_property =
13951 drm_mode_create_rotation_property(dev, 13951 drm_mode_create_rotation_property(dev,
13952 BIT(DRM_ROTATE_0) | 13952 BIT(DRM_ROTATE_0) |
13953 BIT(DRM_ROTATE_180)); 13953 BIT(DRM_ROTATE_180));
13954 if (dev->mode_config.rotation_property) 13954 if (dev->mode_config.rotation_property)
13955 drm_object_attach_property(&cursor->base.base, 13955 drm_object_attach_property(&cursor->base.base,
13956 dev->mode_config.rotation_property, 13956 dev->mode_config.rotation_property,
13957 state->base.rotation); 13957 state->base.rotation);
13958 } 13958 }
13959 13959
13960 if (INTEL_INFO(dev)->gen >=9) 13960 if (INTEL_INFO(dev)->gen >=9)
13961 state->scaler_id = -1; 13961 state->scaler_id = -1;
13962 13962
13963 drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs); 13963 drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
13964 13964
13965 return &cursor->base; 13965 return &cursor->base;
13966} 13966}
13967 13967
13968static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc, 13968static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
13969 struct intel_crtc_state *crtc_state) 13969 struct intel_crtc_state *crtc_state)
13970{ 13970{
13971 int i; 13971 int i;
13972 struct intel_scaler *intel_scaler; 13972 struct intel_scaler *intel_scaler;
13973 struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state; 13973 struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
13974 13974
13975 for (i = 0; i < intel_crtc->num_scalers; i++) { 13975 for (i = 0; i < intel_crtc->num_scalers; i++) {
13976 intel_scaler = &scaler_state->scalers[i]; 13976 intel_scaler = &scaler_state->scalers[i];
13977 intel_scaler->in_use = 0; 13977 intel_scaler->in_use = 0;
13978 intel_scaler->mode = PS_SCALER_MODE_DYN; 13978 intel_scaler->mode = PS_SCALER_MODE_DYN;
13979 } 13979 }
13980 13980
13981 scaler_state->scaler_id = -1; 13981 scaler_state->scaler_id = -1;
13982} 13982}
13983 13983
13984static void intel_crtc_init(struct drm_device *dev, int pipe) 13984static void intel_crtc_init(struct drm_device *dev, int pipe)
13985{ 13985{
13986 struct drm_i915_private *dev_priv = dev->dev_private; 13986 struct drm_i915_private *dev_priv = dev->dev_private;
13987 struct intel_crtc *intel_crtc; 13987 struct intel_crtc *intel_crtc;
13988 struct intel_crtc_state *crtc_state = NULL; 13988 struct intel_crtc_state *crtc_state = NULL;
13989 struct drm_plane *primary = NULL; 13989 struct drm_plane *primary = NULL;
13990 struct drm_plane *cursor = NULL; 13990 struct drm_plane *cursor = NULL;
13991 int i, ret; 13991 int i, ret;
13992 13992
13993 intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL); 13993 intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
13994 if (intel_crtc == NULL) 13994 if (intel_crtc == NULL)
13995 return; 13995 return;
13996 13996
13997 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL); 13997 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
13998 if (!crtc_state) 13998 if (!crtc_state)
13999 goto fail; 13999 goto fail;
14000 intel_crtc->config = crtc_state; 14000 intel_crtc->config = crtc_state;
14001 intel_crtc->base.state = &crtc_state->base; 14001 intel_crtc->base.state = &crtc_state->base;
14002 crtc_state->base.crtc = &intel_crtc->base; 14002 crtc_state->base.crtc = &intel_crtc->base;
14003 14003
14004 /* initialize shared scalers */ 14004 /* initialize shared scalers */
14005 if (INTEL_INFO(dev)->gen >= 9) { 14005 if (INTEL_INFO(dev)->gen >= 9) {
14006 if (pipe == PIPE_C) 14006 if (pipe == PIPE_C)
14007 intel_crtc->num_scalers = 1; 14007 intel_crtc->num_scalers = 1;
14008 else 14008 else
14009 intel_crtc->num_scalers = SKL_NUM_SCALERS; 14009 intel_crtc->num_scalers = SKL_NUM_SCALERS;
14010 14010
14011 skl_init_scalers(dev, intel_crtc, crtc_state); 14011 skl_init_scalers(dev, intel_crtc, crtc_state);
14012 } 14012 }
14013 14013
14014 primary = intel_primary_plane_create(dev, pipe); 14014 primary = intel_primary_plane_create(dev, pipe);
14015 if (!primary) 14015 if (!primary)
14016 goto fail; 14016 goto fail;
14017 14017
14018 cursor = intel_cursor_plane_create(dev, pipe); 14018 cursor = intel_cursor_plane_create(dev, pipe);
14019 if (!cursor) 14019 if (!cursor)
14020 goto fail; 14020 goto fail;
14021 14021
14022 ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary, 14022 ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
14023 cursor, &intel_crtc_funcs); 14023 cursor, &intel_crtc_funcs);
14024 if (ret) 14024 if (ret)
14025 goto fail; 14025 goto fail;
14026 14026
14027 drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); 14027 drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
14028 for (i = 0; i < 256; i++) { 14028 for (i = 0; i < 256; i++) {
14029 intel_crtc->lut_r[i] = i; 14029 intel_crtc->lut_r[i] = i;
14030 intel_crtc->lut_g[i] = i; 14030 intel_crtc->lut_g[i] = i;
14031 intel_crtc->lut_b[i] = i; 14031 intel_crtc->lut_b[i] = i;
14032 } 14032 }
14033 14033
14034 /* 14034 /*
14035 * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port 14035 * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
14036 * is hooked to pipe B. Hence we want plane A feeding pipe B. 14036 * is hooked to pipe B. Hence we want plane A feeding pipe B.
14037 */ 14037 */
14038 intel_crtc->pipe = pipe; 14038 intel_crtc->pipe = pipe;
14039 intel_crtc->plane = pipe; 14039 intel_crtc->plane = pipe;
14040 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) { 14040 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) {
14041 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); 14041 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
14042 intel_crtc->plane = !pipe; 14042 intel_crtc->plane = !pipe;
14043 } 14043 }
14044 14044
14045 intel_crtc->cursor_base = ~0; 14045 intel_crtc->cursor_base = ~0;
14046 intel_crtc->cursor_cntl = ~0; 14046 intel_crtc->cursor_cntl = ~0;
14047 intel_crtc->cursor_size = ~0; 14047 intel_crtc->cursor_size = ~0;
14048 14048
14049 intel_crtc->wm.cxsr_allowed = true; 14049 intel_crtc->wm.cxsr_allowed = true;
14050 14050
14051 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || 14051 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
14052 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL); 14052 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
14053 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; 14053 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
14054 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; 14054 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
14055 14055
14056 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); 14056 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
14057 14057
14058 WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe); 14058 WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
14059 return; 14059 return;
14060 14060
14061fail: 14061fail:
14062 if (primary) 14062 if (primary)
14063 drm_plane_cleanup(primary); 14063 drm_plane_cleanup(primary);
14064 if (cursor) 14064 if (cursor)
14065 drm_plane_cleanup(cursor); 14065 drm_plane_cleanup(cursor);
14066 kfree(crtc_state); 14066 kfree(crtc_state);
14067 kfree(intel_crtc); 14067 kfree(intel_crtc);
14068} 14068}
14069 14069
14070enum i915_pipe intel_get_pipe_from_connector(struct intel_connector *connector) 14070enum i915_pipe intel_get_pipe_from_connector(struct intel_connector *connector)
14071{ 14071{
14072 struct drm_encoder *encoder = connector->base.encoder; 14072 struct drm_encoder *encoder = connector->base.encoder;
14073 struct drm_device *dev = connector->base.dev; 14073 struct drm_device *dev = connector->base.dev;
14074 14074
14075 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); 14075 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
14076 14076
14077 if (!encoder || WARN_ON(!encoder->crtc)) 14077 if (!encoder || WARN_ON(!encoder->crtc))
14078 return INVALID_PIPE; 14078 return INVALID_PIPE;
14079 14079
14080 return to_intel_crtc(encoder->crtc)->pipe; 14080 return to_intel_crtc(encoder->crtc)->pipe;
14081} 14081}
14082 14082
14083int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 14083int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
14084 struct drm_file *file) 14084 struct drm_file *file)
14085{ 14085{
14086 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; 14086 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
14087 struct drm_crtc *drmmode_crtc; 14087 struct drm_crtc *drmmode_crtc;
14088 struct intel_crtc *crtc; 14088 struct intel_crtc *crtc;
14089 14089
14090 drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id); 14090 drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
14091 14091
14092 if (!drmmode_crtc) { 14092 if (!drmmode_crtc) {
14093 DRM_ERROR("no such CRTC id\n"); 14093 DRM_ERROR("no such CRTC id\n");
14094 return -ENOENT; 14094 return -ENOENT;
14095 } 14095 }
14096 14096
14097 crtc = to_intel_crtc(drmmode_crtc); 14097 crtc = to_intel_crtc(drmmode_crtc);
14098 pipe_from_crtc_id->pipe = crtc->pipe; 14098 pipe_from_crtc_id->pipe = crtc->pipe;
14099 14099
14100 return 0; 14100 return 0;
14101} 14101}
14102 14102
14103static int intel_encoder_clones(struct intel_encoder *encoder) 14103static int intel_encoder_clones(struct intel_encoder *encoder)
14104{ 14104{
14105 struct drm_device *dev = encoder->base.dev; 14105 struct drm_device *dev = encoder->base.dev;
14106 struct intel_encoder *source_encoder; 14106 struct intel_encoder *source_encoder;
14107 int index_mask = 0; 14107 int index_mask = 0;
14108 int entry = 0; 14108 int entry = 0;
14109 14109
14110 for_each_intel_encoder(dev, source_encoder) { 14110 for_each_intel_encoder(dev, source_encoder) {
14111 if (encoders_cloneable(encoder, source_encoder)) 14111 if (encoders_cloneable(encoder, source_encoder))
14112 index_mask |= (1 << entry); 14112 index_mask |= (1 << entry);
14113 14113
14114 entry++; 14114 entry++;
14115 } 14115 }
14116 14116
14117 return index_mask; 14117 return index_mask;
14118} 14118}
14119 14119
14120static bool has_edp_a(struct drm_device *dev) 14120static bool has_edp_a(struct drm_device *dev)
14121{ 14121{
14122 struct drm_i915_private *dev_priv = dev->dev_private; 14122 struct drm_i915_private *dev_priv = dev->dev_private;
14123 14123
14124 if (!IS_MOBILE(dev)) 14124 if (!IS_MOBILE(dev))
14125 return false; 14125 return false;
14126 14126
14127 if ((I915_READ(DP_A) & DP_DETECTED) == 0) 14127 if ((I915_READ(DP_A) & DP_DETECTED) == 0)
14128 return false; 14128 return false;
14129 14129
14130 if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE)) 14130 if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
14131 return false; 14131 return false;
14132 14132
14133 return true; 14133 return true;
14134} 14134}
14135 14135
14136static bool intel_crt_present(struct drm_device *dev) 14136static bool intel_crt_present(struct drm_device *dev)
14137{ 14137{
14138 struct drm_i915_private *dev_priv = dev->dev_private; 14138 struct drm_i915_private *dev_priv = dev->dev_private;
14139 14139
14140 if (INTEL_INFO(dev)->gen >= 9) 14140 if (INTEL_INFO(dev)->gen >= 9)
14141 return false; 14141 return false;
14142 14142
14143 if (IS_HSW_ULT(dev) || IS_BDW_ULT(dev)) 14143 if (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
14144 return false; 14144 return false;
14145 14145
14146 if (IS_CHERRYVIEW(dev)) 14146 if (IS_CHERRYVIEW(dev))
14147 return false; 14147 return false;
14148 14148
14149 if (IS_VALLEYVIEW(dev) && !dev_priv->vbt.int_crt_support) 14149 if (IS_VALLEYVIEW(dev) && !dev_priv->vbt.int_crt_support)
14150 return false; 14150 return false;
14151 14151
14152 return true; 14152 return true;
14153} 14153}
14154 14154
14155static void intel_setup_outputs(struct drm_device *dev) 14155static void intel_setup_outputs(struct drm_device *dev)
14156{ 14156{
14157 struct drm_i915_private *dev_priv = dev->dev_private; 14157 struct drm_i915_private *dev_priv = dev->dev_private;
14158 struct intel_encoder *encoder; 14158 struct intel_encoder *encoder;
14159 bool dpd_is_edp = false; 14159 bool dpd_is_edp = false;
14160 14160
14161 intel_lvds_init(dev); 14161 intel_lvds_init(dev);
14162 14162
14163 if (intel_crt_present(dev)) 14163 if (intel_crt_present(dev))
14164 intel_crt_init(dev); 14164 intel_crt_init(dev);
14165 14165
14166 if (IS_BROXTON(dev)) { 14166 if (IS_BROXTON(dev)) {
14167 /* 14167 /*
14168 * FIXME: Broxton doesn't support port detection via the 14168 * FIXME: Broxton doesn't support port detection via the
14169 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to 14169 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
14170 * detect the ports. 14170 * detect the ports.
14171 */ 14171 */
14172 intel_ddi_init(dev, PORT_A); 14172 intel_ddi_init(dev, PORT_A);
14173 intel_ddi_init(dev, PORT_B); 14173 intel_ddi_init(dev, PORT_B);
14174 intel_ddi_init(dev, PORT_C); 14174 intel_ddi_init(dev, PORT_C);
14175 } else if (HAS_DDI(dev)) { 14175 } else if (HAS_DDI(dev)) {
14176 int found; 14176 int found;
14177 14177
14178 /* 14178 /*
14179 * Haswell uses DDI functions to detect digital outputs. 14179 * Haswell uses DDI functions to detect digital outputs.
14180 * On SKL pre-D0 the strap isn't connected, so we assume 14180 * On SKL pre-D0 the strap isn't connected, so we assume
14181 * it's there. 14181 * it's there.
14182 */ 14182 */
14183 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED; 14183 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
14184 /* WaIgnoreDDIAStrap: skl */ 14184 /* WaIgnoreDDIAStrap: skl */
14185 if (found || IS_SKYLAKE(dev)) 14185 if (found || IS_SKYLAKE(dev))
14186 intel_ddi_init(dev, PORT_A); 14186 intel_ddi_init(dev, PORT_A);
14187 14187
14188 /* DDI B, C and D detection is indicated by the SFUSE_STRAP 14188 /* DDI B, C and D detection is indicated by the SFUSE_STRAP
14189 * register */ 14189 * register */
14190 found = I915_READ(SFUSE_STRAP); 14190 found = I915_READ(SFUSE_STRAP);
14191 14191
14192 if (found & SFUSE_STRAP_DDIB_DETECTED) 14192 if (found & SFUSE_STRAP_DDIB_DETECTED)
14193 intel_ddi_init(dev, PORT_B); 14193 intel_ddi_init(dev, PORT_B);
14194 if (found & SFUSE_STRAP_DDIC_DETECTED) 14194 if (found & SFUSE_STRAP_DDIC_DETECTED)
14195 intel_ddi_init(dev, PORT_C); 14195 intel_ddi_init(dev, PORT_C);
14196 if (found & SFUSE_STRAP_DDID_DETECTED) 14196 if (found & SFUSE_STRAP_DDID_DETECTED)
14197 intel_ddi_init(dev, PORT_D); 14197 intel_ddi_init(dev, PORT_D);
14198 /* 14198 /*
14199 * On SKL we don't have a way to detect DDI-E so we rely on VBT. 14199 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
14200 */ 14200 */
14201 if (IS_SKYLAKE(dev) && 14201 if (IS_SKYLAKE(dev) &&
14202 (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp || 14202 (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
14203 dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi || 14203 dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
14204 dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi)) 14204 dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
14205 intel_ddi_init(dev, PORT_E); 14205 intel_ddi_init(dev, PORT_E);
14206 14206
14207 } else if (HAS_PCH_SPLIT(dev)) { 14207 } else if (HAS_PCH_SPLIT(dev)) {
14208 int found; 14208 int found;
14209 dpd_is_edp = intel_dp_is_edp(dev, PORT_D); 14209 dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
14210 14210
14211 if (has_edp_a(dev)) 14211 if (has_edp_a(dev))
14212 intel_dp_init(dev, DP_A, PORT_A); 14212 intel_dp_init(dev, DP_A, PORT_A);
14213 14213
14214 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) { 14214 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
14215 /* PCH SDVOB multiplex with HDMIB */ 14215 /* PCH SDVOB multiplex with HDMIB */
14216 found = intel_sdvo_init(dev, PCH_SDVOB, true); 14216 found = intel_sdvo_init(dev, PCH_SDVOB, true);
14217 if (!found) 14217 if (!found)
14218 intel_hdmi_init(dev, PCH_HDMIB, PORT_B); 14218 intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
14219 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) 14219 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
14220 intel_dp_init(dev, PCH_DP_B, PORT_B); 14220 intel_dp_init(dev, PCH_DP_B, PORT_B);
14221 } 14221 }
14222 14222
14223 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED) 14223 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
14224 intel_hdmi_init(dev, PCH_HDMIC, PORT_C); 14224 intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
14225 14225
14226 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED) 14226 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
14227 intel_hdmi_init(dev, PCH_HDMID, PORT_D); 14227 intel_hdmi_init(dev, PCH_HDMID, PORT_D);
14228 14228
14229 if (I915_READ(PCH_DP_C) & DP_DETECTED) 14229 if (I915_READ(PCH_DP_C) & DP_DETECTED)
14230 intel_dp_init(dev, PCH_DP_C, PORT_C); 14230 intel_dp_init(dev, PCH_DP_C, PORT_C);
14231 14231
14232 if (I915_READ(PCH_DP_D) & DP_DETECTED) 14232 if (I915_READ(PCH_DP_D) & DP_DETECTED)
14233 intel_dp_init(dev, PCH_DP_D, PORT_D); 14233 intel_dp_init(dev, PCH_DP_D, PORT_D);
14234 } else if (IS_VALLEYVIEW(dev)) { 14234 } else if (IS_VALLEYVIEW(dev)) {
14235 bool has_edp, has_port; 14235 bool has_edp, has_port;
14236 14236
14237 /* 14237 /*
14238 * The DP_DETECTED bit is the latched state of the DDC 14238 * The DP_DETECTED bit is the latched state of the DDC
14239 * SDA pin at boot. However since eDP doesn't require DDC 14239 * SDA pin at boot. However since eDP doesn't require DDC
14240 * (no way to plug in a DP->HDMI dongle) the DDC pins for 14240 * (no way to plug in a DP->HDMI dongle) the DDC pins for
14241 * eDP ports may have been muxed to an alternate function. 14241 * eDP ports may have been muxed to an alternate function.
14242 * Thus we can't rely on the DP_DETECTED bit alone to detect 14242 * Thus we can't rely on the DP_DETECTED bit alone to detect
14243 * eDP ports. Consult the VBT as well as DP_DETECTED to 14243 * eDP ports. Consult the VBT as well as DP_DETECTED to
14244 * detect eDP ports. 14244 * detect eDP ports.
14245 * 14245 *
14246 * Sadly the straps seem to be missing sometimes even for HDMI 14246 * Sadly the straps seem to be missing sometimes even for HDMI
14247 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap 14247 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
14248 * and VBT for the presence of the port. Additionally we can't 14248 * and VBT for the presence of the port. Additionally we can't
14249 * trust the port type the VBT declares as we've seen at least 14249 * trust the port type the VBT declares as we've seen at least
14250 * HDMI ports that the VBT claim are DP or eDP. 14250 * HDMI ports that the VBT claim are DP or eDP.
14251 */ 14251 */
14252 has_edp = intel_dp_is_edp(dev, PORT_B); 14252 has_edp = intel_dp_is_edp(dev, PORT_B);
14253 has_port = intel_bios_is_port_present(dev_priv, PORT_B); 14253 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
14254 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port) 14254 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
14255 has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B); 14255 has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B);
14256 if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp) 14256 if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
14257 intel_hdmi_init(dev, VLV_HDMIB, PORT_B); 14257 intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
14258 14258
14259 has_edp = intel_dp_is_edp(dev, PORT_C); 14259 has_edp = intel_dp_is_edp(dev, PORT_C);
14260 has_port = intel_bios_is_port_present(dev_priv, PORT_C); 14260 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
14261 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port) 14261 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
14262 has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C); 14262 has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C);
14263 if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp) 14263 if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
14264 intel_hdmi_init(dev, VLV_HDMIC, PORT_C); 14264 intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
14265 14265
14266 if (IS_CHERRYVIEW(dev)) { 14266 if (IS_CHERRYVIEW(dev)) {
14267 /* 14267 /*
14268 * eDP not supported on port D, 14268 * eDP not supported on port D,
14269 * so no need to worry about it 14269 * so no need to worry about it
14270 */ 14270 */
14271 has_port = intel_bios_is_port_present(dev_priv, PORT_D); 14271 has_port = intel_bios_is_port_present(dev_priv, PORT_D);
14272 if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port) 14272 if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
14273 intel_dp_init(dev, CHV_DP_D, PORT_D); 14273 intel_dp_init(dev, CHV_DP_D, PORT_D);
14274 if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port) 14274 if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
14275 intel_hdmi_init(dev, CHV_HDMID, PORT_D); 14275 intel_hdmi_init(dev, CHV_HDMID, PORT_D);
14276 } 14276 }
14277 14277
14278 intel_dsi_init(dev); 14278 intel_dsi_init(dev);
14279 } else if (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) { 14279 } else if (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) {
14280 bool found = false; 14280 bool found = false;
14281 14281
14282 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { 14282 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14283 DRM_DEBUG_KMS("probing SDVOB\n"); 14283 DRM_DEBUG_KMS("probing SDVOB\n");
14284 found = intel_sdvo_init(dev, GEN3_SDVOB, true); 14284 found = intel_sdvo_init(dev, GEN3_SDVOB, true);
14285 if (!found && IS_G4X(dev)) { 14285 if (!found && IS_G4X(dev)) {
14286 DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); 14286 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
14287 intel_hdmi_init(dev, GEN4_HDMIB, PORT_B); 14287 intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
14288 } 14288 }
14289 14289
14290 if (!found && IS_G4X(dev)) 14290 if (!found && IS_G4X(dev))
14291 intel_dp_init(dev, DP_B, PORT_B); 14291 intel_dp_init(dev, DP_B, PORT_B);
14292 } 14292 }
14293 14293
14294 /* Before G4X SDVOC doesn't have its own detect register */ 14294 /* Before G4X SDVOC doesn't have its own detect register */
14295 14295
14296 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { 14296 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14297 DRM_DEBUG_KMS("probing SDVOC\n"); 14297 DRM_DEBUG_KMS("probing SDVOC\n");
14298 found = intel_sdvo_init(dev, GEN3_SDVOC, false); 14298 found = intel_sdvo_init(dev, GEN3_SDVOC, false);
14299 } 14299 }
14300 14300
14301 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) { 14301 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
14302 14302
14303 if (IS_G4X(dev)) { 14303 if (IS_G4X(dev)) {
14304 DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); 14304 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
14305 intel_hdmi_init(dev, GEN4_HDMIC, PORT_C); 14305 intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
14306 } 14306 }
14307 if (IS_G4X(dev)) 14307 if (IS_G4X(dev))
14308 intel_dp_init(dev, DP_C, PORT_C); 14308 intel_dp_init(dev, DP_C, PORT_C);
14309 } 14309 }
14310 14310
14311 if (IS_G4X(dev) && 14311 if (IS_G4X(dev) &&
14312 (I915_READ(DP_D) & DP_DETECTED)) 14312 (I915_READ(DP_D) & DP_DETECTED))
14313 intel_dp_init(dev, DP_D, PORT_D); 14313 intel_dp_init(dev, DP_D, PORT_D);
14314 } else if (IS_GEN2(dev)) 14314 } else if (IS_GEN2(dev))
14315 intel_dvo_init(dev); 14315 intel_dvo_init(dev);
14316 14316
14317 if (SUPPORTS_TV(dev)) 14317 if (SUPPORTS_TV(dev))
14318 intel_tv_init(dev); 14318 intel_tv_init(dev);
14319 14319
14320 intel_psr_init(dev); 14320 intel_psr_init(dev);
14321 14321
14322 for_each_intel_encoder(dev, encoder) { 14322 for_each_intel_encoder(dev, encoder) {
14323 encoder->base.possible_crtcs = encoder->crtc_mask; 14323 encoder->base.possible_crtcs = encoder->crtc_mask;
14324 encoder->base.possible_clones = 14324 encoder->base.possible_clones =
14325 intel_encoder_clones(encoder); 14325 intel_encoder_clones(encoder);
14326 } 14326 }
14327 14327
14328 intel_init_pch_refclk(dev); 14328 intel_init_pch_refclk(dev);
14329 14329
14330 drm_helper_move_panel_connectors_to_head(dev); 14330 drm_helper_move_panel_connectors_to_head(dev);
14331} 14331}
14332 14332
14333static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) 14333static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
14334{ 14334{
14335 struct drm_device *dev = fb->dev; 14335 struct drm_device *dev = fb->dev;
14336 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 14336 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14337 14337
14338 drm_framebuffer_cleanup(fb); 14338 drm_framebuffer_cleanup(fb);
14339 mutex_lock(&dev->struct_mutex); 14339 mutex_lock(&dev->struct_mutex);
14340 WARN_ON(!intel_fb->obj->framebuffer_references--); 14340 WARN_ON(!intel_fb->obj->framebuffer_references--);
14341 drm_gem_object_unreference(&intel_fb->obj->base); 14341 drm_gem_object_unreference(&intel_fb->obj->base);
14342 mutex_unlock(&dev->struct_mutex); 14342 mutex_unlock(&dev->struct_mutex);
14343 kfree(intel_fb); 14343 kfree(intel_fb);
14344} 14344}
14345 14345
14346static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, 14346static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
14347 struct drm_file *file, 14347 struct drm_file *file,
14348 unsigned int *handle) 14348 unsigned int *handle)
14349{ 14349{
14350 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 14350 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14351 struct drm_i915_gem_object *obj = intel_fb->obj; 14351 struct drm_i915_gem_object *obj = intel_fb->obj;
14352 14352
14353 if (obj->userptr.mm) { 14353 if (obj->userptr.mm) {
14354 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n"); 14354 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
14355 return -EINVAL; 14355 return -EINVAL;
14356 } 14356 }
14357 14357
14358 return drm_gem_handle_create(file, &obj->base, handle); 14358 return drm_gem_handle_create(file, &obj->base, handle);
14359} 14359}
14360 14360
14361static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb, 14361static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
14362 struct drm_file *file, 14362 struct drm_file *file,
14363 unsigned flags, unsigned color, 14363 unsigned flags, unsigned color,
14364 struct drm_clip_rect *clips, 14364 struct drm_clip_rect *clips,
14365 unsigned num_clips) 14365 unsigned num_clips)
14366{ 14366{
14367 struct drm_device *dev = fb->dev; 14367 struct drm_device *dev = fb->dev;
14368 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 14368 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14369 struct drm_i915_gem_object *obj = intel_fb->obj; 14369 struct drm_i915_gem_object *obj = intel_fb->obj;
14370 14370
14371 mutex_lock(&dev->struct_mutex); 14371 mutex_lock(&dev->struct_mutex);
14372 intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB); 14372 intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
14373 mutex_unlock(&dev->struct_mutex); 14373 mutex_unlock(&dev->struct_mutex);
14374 14374
14375 return 0; 14375 return 0;
14376} 14376}
14377 14377
14378static const struct drm_framebuffer_funcs intel_fb_funcs = { 14378static const struct drm_framebuffer_funcs intel_fb_funcs = {
14379 .destroy = intel_user_framebuffer_destroy, 14379 .destroy = intel_user_framebuffer_destroy,
14380 .create_handle = intel_user_framebuffer_create_handle, 14380 .create_handle = intel_user_framebuffer_create_handle,
14381 .dirty = intel_user_framebuffer_dirty, 14381 .dirty = intel_user_framebuffer_dirty,
14382}; 14382};
14383 14383
14384static 14384static
14385u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier, 14385u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier,
14386 uint32_t pixel_format) 14386 uint32_t pixel_format)
14387{ 14387{
14388 u32 gen = INTEL_INFO(dev)->gen; 14388 u32 gen = INTEL_INFO(dev)->gen;
14389 14389
14390 if (gen >= 9) { 14390 if (gen >= 9) {
14391 /* "The stride in bytes must not exceed the of the size of 8K 14391 /* "The stride in bytes must not exceed the of the size of 8K
14392 * pixels and 32K bytes." 14392 * pixels and 32K bytes."
14393 */ 14393 */
14394 return min(8192*drm_format_plane_cpp(pixel_format, 0), 32768); 14394 return min(8192*drm_format_plane_cpp(pixel_format, 0), 32768);
14395 } else if (gen >= 5 && !IS_VALLEYVIEW(dev)) { 14395 } else if (gen >= 5 && !IS_VALLEYVIEW(dev)) {
14396 return 32*1024; 14396 return 32*1024;
14397 } else if (gen >= 4) { 14397 } else if (gen >= 4) {
14398 if (fb_modifier == I915_FORMAT_MOD_X_TILED) 14398 if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14399 return 16*1024; 14399 return 16*1024;
14400 else 14400 else
14401 return 32*1024; 14401 return 32*1024;
14402 } else if (gen >= 3) { 14402 } else if (gen >= 3) {
14403 if (fb_modifier == I915_FORMAT_MOD_X_TILED) 14403 if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14404 return 8*1024; 14404 return 8*1024;
14405 else 14405 else
14406 return 16*1024; 14406 return 16*1024;
14407 } else { 14407 } else {
14408 /* XXX DSPC is limited to 4k tiled */ 14408 /* XXX DSPC is limited to 4k tiled */
14409 return 8*1024; 14409 return 8*1024;
14410 } 14410 }
14411} 14411}
14412 14412
14413static int intel_framebuffer_init(struct drm_device *dev, 14413static int intel_framebuffer_init(struct drm_device *dev,
14414 struct intel_framebuffer *intel_fb, 14414 struct intel_framebuffer *intel_fb,
14415 struct drm_mode_fb_cmd2 *mode_cmd, 14415 struct drm_mode_fb_cmd2 *mode_cmd,
14416 struct drm_i915_gem_object *obj) 14416 struct drm_i915_gem_object *obj)
14417{ 14417{
14418 unsigned int aligned_height; 14418 unsigned int aligned_height;
14419 int ret; 14419 int ret;
14420 u32 pitch_limit, stride_alignment; 14420 u32 pitch_limit, stride_alignment;
14421 14421
14422 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 14422 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
14423 14423
14424 if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) { 14424 if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
14425 /* Enforce that fb modifier and tiling mode match, but only for 14425 /* Enforce that fb modifier and tiling mode match, but only for
14426 * X-tiled. This is needed for FBC. */ 14426 * X-tiled. This is needed for FBC. */
14427 if (!!(obj->tiling_mode == I915_TILING_X) != 14427 if (!!(obj->tiling_mode == I915_TILING_X) !=
14428 !!(mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)) { 14428 !!(mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)) {
14429 DRM_DEBUG("tiling_mode doesn't match fb modifier\n"); 14429 DRM_DEBUG("tiling_mode doesn't match fb modifier\n");
14430 return -EINVAL; 14430 return -EINVAL;
14431 } 14431 }
14432 } else { 14432 } else {
14433 if (obj->tiling_mode == I915_TILING_X) 14433 if (obj->tiling_mode == I915_TILING_X)
14434 mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED; 14434 mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
14435 else if (obj->tiling_mode == I915_TILING_Y) { 14435 else if (obj->tiling_mode == I915_TILING_Y) {
14436 DRM_DEBUG("No Y tiling for legacy addfb\n"); 14436 DRM_DEBUG("No Y tiling for legacy addfb\n");
14437 return -EINVAL; 14437 return -EINVAL;
14438 } 14438 }
14439 } 14439 }
14440 14440
14441 /* Passed in modifier sanity checking. */ 14441 /* Passed in modifier sanity checking. */
14442 switch (mode_cmd->modifier[0]) { 14442 switch (mode_cmd->modifier[0]) {
14443 case I915_FORMAT_MOD_Y_TILED: 14443 case I915_FORMAT_MOD_Y_TILED:
14444 case I915_FORMAT_MOD_Yf_TILED: 14444 case I915_FORMAT_MOD_Yf_TILED:
14445 if (INTEL_INFO(dev)->gen < 9) { 14445 if (INTEL_INFO(dev)->gen < 9) {
14446 DRM_DEBUG("Unsupported tiling 0x%llx!\n", 14446 DRM_DEBUG("Unsupported tiling 0x%llx!\n",
14447 mode_cmd->modifier[0]); 14447 mode_cmd->modifier[0]);
14448 return -EINVAL; 14448 return -EINVAL;
14449 } 14449 }
14450 case DRM_FORMAT_MOD_NONE: 14450 case DRM_FORMAT_MOD_NONE:
14451 case I915_FORMAT_MOD_X_TILED: 14451 case I915_FORMAT_MOD_X_TILED:
14452 break; 14452 break;
14453 default: 14453 default:
14454 DRM_DEBUG("Unsupported fb modifier 0x%llx!\n", 14454 DRM_DEBUG("Unsupported fb modifier 0x%llx!\n",
14455 mode_cmd->modifier[0]); 14455 mode_cmd->modifier[0]);
14456 return -EINVAL; 14456 return -EINVAL;
14457 } 14457 }
14458 14458
14459 stride_alignment = intel_fb_stride_alignment(dev, mode_cmd->modifier[0], 14459 stride_alignment = intel_fb_stride_alignment(dev, mode_cmd->modifier[0],
14460 mode_cmd->pixel_format); 14460 mode_cmd->pixel_format);
14461 if (mode_cmd->pitches[0] & (stride_alignment - 1)) { 14461 if (mode_cmd->pitches[0] & (stride_alignment - 1)) {
14462 DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n", 14462 DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n",
14463 mode_cmd->pitches[0], stride_alignment); 14463 mode_cmd->pitches[0], stride_alignment);
14464 return -EINVAL; 14464 return -EINVAL;
14465 } 14465 }
14466 14466
14467 pitch_limit = intel_fb_pitch_limit(dev, mode_cmd->modifier[0], 14467 pitch_limit = intel_fb_pitch_limit(dev, mode_cmd->modifier[0],
14468 mode_cmd->pixel_format); 14468 mode_cmd->pixel_format);
14469 if (mode_cmd->pitches[0] > pitch_limit) { 14469 if (mode_cmd->pitches[0] > pitch_limit) {
14470 DRM_DEBUG("%s pitch (%u) must be at less than %d\n", 14470 DRM_DEBUG("%s pitch (%u) must be at less than %d\n",
14471 mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ? 14471 mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ?
14472 "tiled" : "linear", 14472 "tiled" : "linear",
14473 mode_cmd->pitches[0], pitch_limit); 14473 mode_cmd->pitches[0], pitch_limit);
14474 return -EINVAL; 14474 return -EINVAL;
14475 } 14475 }
14476 14476
14477 if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED && 14477 if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED &&
14478 mode_cmd->pitches[0] != obj->stride) { 14478 mode_cmd->pitches[0] != obj->stride) {
14479 DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n", 14479 DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
14480 mode_cmd->pitches[0], obj->stride); 14480 mode_cmd->pitches[0], obj->stride);
14481 return -EINVAL; 14481 return -EINVAL;
14482 } 14482 }
14483 14483
14484 /* Reject formats not supported by any plane early. */ 14484 /* Reject formats not supported by any plane early. */
14485 switch (mode_cmd->pixel_format) { 14485 switch (mode_cmd->pixel_format) {
14486 case DRM_FORMAT_C8: 14486 case DRM_FORMAT_C8:
14487 case DRM_FORMAT_RGB565: 14487 case DRM_FORMAT_RGB565:
14488 case DRM_FORMAT_XRGB8888: 14488 case DRM_FORMAT_XRGB8888:
14489 case DRM_FORMAT_ARGB8888: 14489 case DRM_FORMAT_ARGB8888:
14490 break; 14490 break;
14491 case DRM_FORMAT_XRGB1555: 14491 case DRM_FORMAT_XRGB1555:
14492 if (INTEL_INFO(dev)->gen > 3) { 14492 if (INTEL_INFO(dev)->gen > 3) {
14493 DRM_DEBUG("unsupported pixel format: %s\n", 14493 DRM_DEBUG("unsupported pixel format: %s\n",
14494 drm_get_format_name(mode_cmd->pixel_format)); 14494 drm_get_format_name(mode_cmd->pixel_format));
14495 return -EINVAL; 14495 return -EINVAL;
14496 } 14496 }
14497 break; 14497 break;
14498 case DRM_FORMAT_ABGR8888: 14498 case DRM_FORMAT_ABGR8888:
14499 if (!IS_VALLEYVIEW(dev) && INTEL_INFO(dev)->gen < 9) { 14499 if (!IS_VALLEYVIEW(dev) && INTEL_INFO(dev)->gen < 9) {
14500 DRM_DEBUG("unsupported pixel format: %s\n", 14500 DRM_DEBUG("unsupported pixel format: %s\n",
14501 drm_get_format_name(mode_cmd->pixel_format)); 14501 drm_get_format_name(mode_cmd->pixel_format));
14502 return -EINVAL; 14502 return -EINVAL;
14503 } 14503 }
14504 break; 14504 break;
14505 case DRM_FORMAT_XBGR8888: 14505 case DRM_FORMAT_XBGR8888:
14506 case DRM_FORMAT_XRGB2101010: 14506 case DRM_FORMAT_XRGB2101010:
14507 case DRM_FORMAT_XBGR2101010: 14507 case DRM_FORMAT_XBGR2101010:
14508 if (INTEL_INFO(dev)->gen < 4) { 14508 if (INTEL_INFO(dev)->gen < 4) {
14509 DRM_DEBUG("unsupported pixel format: %s\n", 14509 DRM_DEBUG("unsupported pixel format: %s\n",
14510 drm_get_format_name(mode_cmd->pixel_format)); 14510 drm_get_format_name(mode_cmd->pixel_format));
14511 return -EINVAL; 14511 return -EINVAL;
14512 } 14512 }
14513 break; 14513 break;
14514 case DRM_FORMAT_ABGR2101010: 14514 case DRM_FORMAT_ABGR2101010:
14515 if (!IS_VALLEYVIEW(dev)) { 14515 if (!IS_VALLEYVIEW(dev)) {
14516 DRM_DEBUG("unsupported pixel format: %s\n", 14516 DRM_DEBUG("unsupported pixel format: %s\n",
14517 drm_get_format_name(mode_cmd->pixel_format)); 14517 drm_get_format_name(mode_cmd->pixel_format));
14518 return -EINVAL; 14518 return -EINVAL;
14519 } 14519 }
14520 break; 14520 break;
14521 case DRM_FORMAT_YUYV: 14521 case DRM_FORMAT_YUYV:
14522 case DRM_FORMAT_UYVY: 14522 case DRM_FORMAT_UYVY:
14523 case DRM_FORMAT_YVYU: 14523 case DRM_FORMAT_YVYU:
14524 case DRM_FORMAT_VYUY: 14524 case DRM_FORMAT_VYUY:
14525 if (INTEL_INFO(dev)->gen < 5) { 14525 if (INTEL_INFO(dev)->gen < 5) {
14526 DRM_DEBUG("unsupported pixel format: %s\n", 14526 DRM_DEBUG("unsupported pixel format: %s\n",
14527 drm_get_format_name(mode_cmd->pixel_format)); 14527 drm_get_format_name(mode_cmd->pixel_format));
14528 return -EINVAL; 14528 return -EINVAL;
14529 } 14529 }
14530 break; 14530 break;
14531 default: 14531 default:
14532 DRM_DEBUG("unsupported pixel format: %s\n", 14532 DRM_DEBUG("unsupported pixel format: %s\n",
14533 drm_get_format_name(mode_cmd->pixel_format)); 14533 drm_get_format_name(mode_cmd->pixel_format));
14534 return -EINVAL; 14534 return -EINVAL;
14535 } 14535 }
14536 14536
14537 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */ 14537 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
14538 if (mode_cmd->offsets[0] != 0) 14538 if (mode_cmd->offsets[0] != 0)
14539 return -EINVAL; 14539 return -EINVAL;
14540 14540
14541 aligned_height = intel_fb_align_height(dev, mode_cmd->height, 14541 aligned_height = intel_fb_align_height(dev, mode_cmd->height,
14542 mode_cmd->pixel_format, 14542 mode_cmd->pixel_format,
14543 mode_cmd->modifier[0]); 14543 mode_cmd->modifier[0]);
14544 /* FIXME drm helper for size checks (especially planar formats)? */ 14544 /* FIXME drm helper for size checks (especially planar formats)? */
14545 if (obj->base.size < aligned_height * mode_cmd->pitches[0]) 14545 if (obj->base.size < aligned_height * mode_cmd->pitches[0])
14546 return -EINVAL; 14546 return -EINVAL;
14547 14547
14548 drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); 14548 drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
14549 intel_fb->obj = obj; 14549 intel_fb->obj = obj;
14550 intel_fb->obj->framebuffer_references++; 14550 intel_fb->obj->framebuffer_references++;
14551 14551
14552 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); 14552 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
14553 if (ret) { 14553 if (ret) {
14554 DRM_ERROR("framebuffer init failed %d\n", ret); 14554 DRM_ERROR("framebuffer init failed %d\n", ret);
14555 return ret; 14555 return ret;
14556 } 14556 }
14557 14557
14558 return 0; 14558 return 0;
14559} 14559}
14560 14560
14561static struct drm_framebuffer * 14561static struct drm_framebuffer *
14562intel_user_framebuffer_create(struct drm_device *dev, 14562intel_user_framebuffer_create(struct drm_device *dev,
14563 struct drm_file *filp, 14563 struct drm_file *filp,
14564 struct drm_mode_fb_cmd2 *user_mode_cmd) 14564 struct drm_mode_fb_cmd2 *user_mode_cmd)
14565{ 14565{
14566 struct drm_gem_object *gobj; 14566 struct drm_gem_object *gobj;
14567 struct drm_i915_gem_object *obj; 14567 struct drm_i915_gem_object *obj;
14568 struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd; 14568 struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
14569 14569
14570 gobj = drm_gem_object_lookup(dev, filp, mode_cmd.handles[0]); 14570 gobj = drm_gem_object_lookup(dev, filp, mode_cmd.handles[0]);
14571 if (gobj == NULL) 14571 if (gobj == NULL)
14572 return ERR_PTR(-ENOENT); 14572 return ERR_PTR(-ENOENT);
14573 obj = to_intel_bo(gobj); 14573 obj = to_intel_bo(gobj);
14574 14574
14575 return intel_framebuffer_create(dev, &mode_cmd, obj); 14575 return intel_framebuffer_create(dev, &mode_cmd, obj);
14576} 14576}
14577 14577
14578#ifndef CONFIG_DRM_FBDEV_EMULATION 14578#ifndef CONFIG_DRM_FBDEV_EMULATION
14579static inline void intel_fbdev_output_poll_changed(struct drm_device *dev) 14579static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
14580{ 14580{
14581} 14581}
14582#endif 14582#endif
14583 14583
14584static const struct drm_mode_config_funcs intel_mode_funcs = { 14584static const struct drm_mode_config_funcs intel_mode_funcs = {
14585 .fb_create = intel_user_framebuffer_create, 14585 .fb_create = intel_user_framebuffer_create,
14586 .output_poll_changed = intel_fbdev_output_poll_changed, 14586 .output_poll_changed = intel_fbdev_output_poll_changed,
14587 .atomic_check = intel_atomic_check, 14587 .atomic_check = intel_atomic_check,
14588 .atomic_commit = intel_atomic_commit, 14588 .atomic_commit = intel_atomic_commit,
14589 .atomic_state_alloc = intel_atomic_state_alloc, 14589 .atomic_state_alloc = intel_atomic_state_alloc,
14590 .atomic_state_clear = intel_atomic_state_clear, 14590 .atomic_state_clear = intel_atomic_state_clear,
14591}; 14591};
14592 14592
14593/* Set up chip specific display functions */ 14593/* Set up chip specific display functions */
14594static void intel_init_display(struct drm_device *dev) 14594static void intel_init_display(struct drm_device *dev)
14595{ 14595{
14596 struct drm_i915_private *dev_priv = dev->dev_private; 14596 struct drm_i915_private *dev_priv = dev->dev_private;
14597 14597
14598 if (HAS_PCH_SPLIT(dev) || IS_G4X(dev)) 14598 if (HAS_PCH_SPLIT(dev) || IS_G4X(dev))
14599 dev_priv->display.find_dpll = g4x_find_best_dpll; 14599 dev_priv->display.find_dpll = g4x_find_best_dpll;
14600 else if (IS_CHERRYVIEW(dev)) 14600 else if (IS_CHERRYVIEW(dev))
14601 dev_priv->display.find_dpll = chv_find_best_dpll; 14601 dev_priv->display.find_dpll = chv_find_best_dpll;
14602 else if (IS_VALLEYVIEW(dev)) 14602 else if (IS_VALLEYVIEW(dev))
14603 dev_priv->display.find_dpll = vlv_find_best_dpll; 14603 dev_priv->display.find_dpll = vlv_find_best_dpll;
14604 else if (IS_PINEVIEW(dev)) 14604 else if (IS_PINEVIEW(dev))
14605 dev_priv->display.find_dpll = pnv_find_best_dpll; 14605 dev_priv->display.find_dpll = pnv_find_best_dpll;
14606 else 14606 else
14607 dev_priv->display.find_dpll = i9xx_find_best_dpll; 14607 dev_priv->display.find_dpll = i9xx_find_best_dpll;
14608 14608
14609 if (INTEL_INFO(dev)->gen >= 9) { 14609 if (INTEL_INFO(dev)->gen >= 9) {
14610 dev_priv->display.get_pipe_config = haswell_get_pipe_config; 14610 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
14611 dev_priv->display.get_initial_plane_config = 14611 dev_priv->display.get_initial_plane_config =
14612 skylake_get_initial_plane_config; 14612 skylake_get_initial_plane_config;
14613 dev_priv->display.crtc_compute_clock = 14613 dev_priv->display.crtc_compute_clock =
14614 haswell_crtc_compute_clock; 14614 haswell_crtc_compute_clock;
14615 dev_priv->display.crtc_enable = haswell_crtc_enable; 14615 dev_priv->display.crtc_enable = haswell_crtc_enable;
14616 dev_priv->display.crtc_disable = haswell_crtc_disable; 14616 dev_priv->display.crtc_disable = haswell_crtc_disable;
14617 dev_priv->display.update_primary_plane = 14617 dev_priv->display.update_primary_plane =
14618 skylake_update_primary_plane; 14618 skylake_update_primary_plane;
14619 } else if (HAS_DDI(dev)) { 14619 } else if (HAS_DDI(dev)) {
14620 dev_priv->display.get_pipe_config = haswell_get_pipe_config; 14620 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
14621 dev_priv->display.get_initial_plane_config = 14621 dev_priv->display.get_initial_plane_config =
14622 ironlake_get_initial_plane_config; 14622 ironlake_get_initial_plane_config;
14623 dev_priv->display.crtc_compute_clock = 14623 dev_priv->display.crtc_compute_clock =
14624 haswell_crtc_compute_clock; 14624 haswell_crtc_compute_clock;
14625 dev_priv->display.crtc_enable = haswell_crtc_enable; 14625 dev_priv->display.crtc_enable = haswell_crtc_enable;
14626 dev_priv->display.crtc_disable = haswell_crtc_disable; 14626 dev_priv->display.crtc_disable = haswell_crtc_disable;
14627 dev_priv->display.update_primary_plane = 14627 dev_priv->display.update_primary_plane =
14628 ironlake_update_primary_plane; 14628 ironlake_update_primary_plane;
14629 } else if (HAS_PCH_SPLIT(dev)) { 14629 } else if (HAS_PCH_SPLIT(dev)) {
14630 dev_priv->display.get_pipe_config = ironlake_get_pipe_config; 14630 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
14631 dev_priv->display.get_initial_plane_config = 14631 dev_priv->display.get_initial_plane_config =
14632 ironlake_get_initial_plane_config; 14632 ironlake_get_initial_plane_config;
14633 dev_priv->display.crtc_compute_clock = 14633 dev_priv->display.crtc_compute_clock =
14634 ironlake_crtc_compute_clock; 14634 ironlake_crtc_compute_clock;
14635 dev_priv->display.crtc_enable = ironlake_crtc_enable; 14635 dev_priv->display.crtc_enable = ironlake_crtc_enable;
14636 dev_priv->display.crtc_disable = ironlake_crtc_disable; 14636 dev_priv->display.crtc_disable = ironlake_crtc_disable;
14637 dev_priv->display.update_primary_plane = 14637 dev_priv->display.update_primary_plane =
14638 ironlake_update_primary_plane; 14638 ironlake_update_primary_plane;
14639 } else if (IS_VALLEYVIEW(dev)) { 14639 } else if (IS_VALLEYVIEW(dev)) {
14640 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 14640 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14641 dev_priv->display.get_initial_plane_config = 14641 dev_priv->display.get_initial_plane_config =
14642 i9xx_get_initial_plane_config; 14642 i9xx_get_initial_plane_config;
14643 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock; 14643 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
14644 dev_priv->display.crtc_enable = valleyview_crtc_enable; 14644 dev_priv->display.crtc_enable = valleyview_crtc_enable;
14645 dev_priv->display.crtc_disable = i9xx_crtc_disable; 14645 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14646 dev_priv->display.update_primary_plane = 14646 dev_priv->display.update_primary_plane =
14647 i9xx_update_primary_plane; 14647 i9xx_update_primary_plane;
14648 } else { 14648 } else {
14649 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 14649 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14650 dev_priv->display.get_initial_plane_config = 14650 dev_priv->display.get_initial_plane_config =
14651 i9xx_get_initial_plane_config; 14651 i9xx_get_initial_plane_config;
14652 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock; 14652 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
14653 dev_priv->display.crtc_enable = i9xx_crtc_enable; 14653 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14654 dev_priv->display.crtc_disable = i9xx_crtc_disable; 14654 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14655 dev_priv->display.update_primary_plane = 14655 dev_priv->display.update_primary_plane =
14656 i9xx_update_primary_plane; 14656 i9xx_update_primary_plane;
14657 } 14657 }
14658 14658
14659 /* Returns the core display clock speed */ 14659 /* Returns the core display clock speed */
14660 if (IS_SKYLAKE(dev)) 14660 if (IS_SKYLAKE(dev))
14661 dev_priv->display.get_display_clock_speed = 14661 dev_priv->display.get_display_clock_speed =
14662 skylake_get_display_clock_speed; 14662 skylake_get_display_clock_speed;
14663 else if (IS_BROXTON(dev)) 14663 else if (IS_BROXTON(dev))
14664 dev_priv->display.get_display_clock_speed = 14664 dev_priv->display.get_display_clock_speed =
14665 broxton_get_display_clock_speed; 14665 broxton_get_display_clock_speed;
14666 else if (IS_BROADWELL(dev)) 14666 else if (IS_BROADWELL(dev))
14667 dev_priv->display.get_display_clock_speed = 14667 dev_priv->display.get_display_clock_speed =
14668 broadwell_get_display_clock_speed; 14668 broadwell_get_display_clock_speed;
14669 else if (IS_HASWELL(dev)) 14669 else if (IS_HASWELL(dev))
14670 dev_priv->display.get_display_clock_speed = 14670 dev_priv->display.get_display_clock_speed =
14671 haswell_get_display_clock_speed; 14671 haswell_get_display_clock_speed;
14672 else if (IS_VALLEYVIEW(dev)) 14672 else if (IS_VALLEYVIEW(dev))
14673 dev_priv->display.get_display_clock_speed = 14673 dev_priv->display.get_display_clock_speed =
14674 valleyview_get_display_clock_speed; 14674 valleyview_get_display_clock_speed;
14675 else if (IS_GEN5(dev)) 14675 else if (IS_GEN5(dev))
14676 dev_priv->display.get_display_clock_speed = 14676 dev_priv->display.get_display_clock_speed =
14677 ilk_get_display_clock_speed; 14677 ilk_get_display_clock_speed;
14678 else if (IS_I945G(dev) || IS_BROADWATER(dev) || 14678 else if (IS_I945G(dev) || IS_BROADWATER(dev) ||
14679 IS_GEN6(dev) || IS_IVYBRIDGE(dev)) 14679 IS_GEN6(dev) || IS_IVYBRIDGE(dev))
14680 dev_priv->display.get_display_clock_speed = 14680 dev_priv->display.get_display_clock_speed =
14681 i945_get_display_clock_speed; 14681 i945_get_display_clock_speed;
14682 else if (IS_GM45(dev)) 14682 else if (IS_GM45(dev))
14683 dev_priv->display.get_display_clock_speed = 14683 dev_priv->display.get_display_clock_speed =
14684 gm45_get_display_clock_speed; 14684 gm45_get_display_clock_speed;
14685 else if (IS_CRESTLINE(dev)) 14685 else if (IS_CRESTLINE(dev))
14686 dev_priv->display.get_display_clock_speed = 14686 dev_priv->display.get_display_clock_speed =
14687 i965gm_get_display_clock_speed; 14687 i965gm_get_display_clock_speed;
14688 else if (IS_PINEVIEW(dev)) 14688 else if (IS_PINEVIEW(dev))
14689 dev_priv->display.get_display_clock_speed = 14689 dev_priv->display.get_display_clock_speed =
14690 pnv_get_display_clock_speed; 14690 pnv_get_display_clock_speed;
14691 else if (IS_G33(dev) || IS_G4X(dev)) 14691 else if (IS_G33(dev) || IS_G4X(dev))
14692 dev_priv->display.get_display_clock_speed = 14692 dev_priv->display.get_display_clock_speed =
14693 g33_get_display_clock_speed; 14693 g33_get_display_clock_speed;
14694 else if (IS_I915G(dev)) 14694 else if (IS_I915G(dev))
14695 dev_priv->display.get_display_clock_speed = 14695 dev_priv->display.get_display_clock_speed =
14696 i915_get_display_clock_speed; 14696 i915_get_display_clock_speed;
14697 else if (IS_I945GM(dev) || IS_845G(dev)) 14697 else if (IS_I945GM(dev) || IS_845G(dev))
14698 dev_priv->display.get_display_clock_speed = 14698 dev_priv->display.get_display_clock_speed =
14699 i9xx_misc_get_display_clock_speed; 14699 i9xx_misc_get_display_clock_speed;
14700 else if (IS_PINEVIEW(dev)) 14700 else if (IS_PINEVIEW(dev))
14701 dev_priv->display.get_display_clock_speed = 14701 dev_priv->display.get_display_clock_speed =
14702 pnv_get_display_clock_speed; 14702 pnv_get_display_clock_speed;
14703 else if (IS_I915GM(dev)) 14703 else if (IS_I915GM(dev))
14704 dev_priv->display.get_display_clock_speed = 14704 dev_priv->display.get_display_clock_speed =
14705 i915gm_get_display_clock_speed; 14705 i915gm_get_display_clock_speed;
14706 else if (IS_I865G(dev)) 14706 else if (IS_I865G(dev))
14707 dev_priv->display.get_display_clock_speed = 14707 dev_priv->display.get_display_clock_speed =
14708 i865_get_display_clock_speed; 14708 i865_get_display_clock_speed;
14709 else if (IS_I85X(dev)) 14709 else if (IS_I85X(dev))
14710 dev_priv->display.get_display_clock_speed = 14710 dev_priv->display.get_display_clock_speed =
14711 i85x_get_display_clock_speed; 14711 i85x_get_display_clock_speed;
14712 else { /* 830 */ 14712 else { /* 830 */
14713 WARN(!IS_I830(dev), "Unknown platform. Assuming 133 MHz CDCLK\n"); 14713 WARN(!IS_I830(dev), "Unknown platform. Assuming 133 MHz CDCLK\n");
14714 dev_priv->display.get_display_clock_speed = 14714 dev_priv->display.get_display_clock_speed =
14715 i830_get_display_clock_speed; 14715 i830_get_display_clock_speed;
14716 } 14716 }
14717 14717
14718 if (IS_GEN5(dev)) { 14718 if (IS_GEN5(dev)) {
14719 dev_priv->display.fdi_link_train = ironlake_fdi_link_train; 14719 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
14720 } else if (IS_GEN6(dev)) { 14720 } else if (IS_GEN6(dev)) {
14721 dev_priv->display.fdi_link_train = gen6_fdi_link_train; 14721 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
14722 } else if (IS_IVYBRIDGE(dev)) { 14722 } else if (IS_IVYBRIDGE(dev)) {
14723 /* FIXME: detect B0+ stepping and use auto training */ 14723 /* FIXME: detect B0+ stepping and use auto training */
14724 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; 14724 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
14725 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 14725 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
14726 dev_priv->display.fdi_link_train = hsw_fdi_link_train; 14726 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
14727 if (IS_BROADWELL(dev)) { 14727 if (IS_BROADWELL(dev)) {
14728 dev_priv->display.modeset_commit_cdclk = 14728 dev_priv->display.modeset_commit_cdclk =
14729 broadwell_modeset_commit_cdclk; 14729 broadwell_modeset_commit_cdclk;
14730 dev_priv->display.modeset_calc_cdclk = 14730 dev_priv->display.modeset_calc_cdclk =
14731 broadwell_modeset_calc_cdclk; 14731 broadwell_modeset_calc_cdclk;
14732 } 14732 }
14733 } else if (IS_VALLEYVIEW(dev)) { 14733 } else if (IS_VALLEYVIEW(dev)) {
14734 dev_priv->display.modeset_commit_cdclk = 14734 dev_priv->display.modeset_commit_cdclk =
14735 valleyview_modeset_commit_cdclk; 14735 valleyview_modeset_commit_cdclk;
14736 dev_priv->display.modeset_calc_cdclk = 14736 dev_priv->display.modeset_calc_cdclk =
14737 valleyview_modeset_calc_cdclk; 14737 valleyview_modeset_calc_cdclk;
14738 } else if (IS_BROXTON(dev)) { 14738 } else if (IS_BROXTON(dev)) {
14739 dev_priv->display.modeset_commit_cdclk = 14739 dev_priv->display.modeset_commit_cdclk =
14740 broxton_modeset_commit_cdclk; 14740 broxton_modeset_commit_cdclk;
14741 dev_priv->display.modeset_calc_cdclk = 14741 dev_priv->display.modeset_calc_cdclk =
14742 broxton_modeset_calc_cdclk; 14742 broxton_modeset_calc_cdclk;
14743 } 14743 }
14744 14744
14745 switch (INTEL_INFO(dev)->gen) { 14745 switch (INTEL_INFO(dev)->gen) {
14746 case 2: 14746 case 2:
14747 dev_priv->display.queue_flip = intel_gen2_queue_flip; 14747 dev_priv->display.queue_flip = intel_gen2_queue_flip;
14748 break; 14748 break;
14749 14749
14750 case 3: 14750 case 3:
14751 dev_priv->display.queue_flip = intel_gen3_queue_flip; 14751 dev_priv->display.queue_flip = intel_gen3_queue_flip;
14752 break; 14752 break;
14753 14753
14754 case 4: 14754 case 4:
14755 case 5: 14755 case 5:
14756 dev_priv->display.queue_flip = intel_gen4_queue_flip; 14756 dev_priv->display.queue_flip = intel_gen4_queue_flip;
14757 break; 14757 break;
14758 14758
14759 case 6: 14759 case 6:
14760 dev_priv->display.queue_flip = intel_gen6_queue_flip; 14760 dev_priv->display.queue_flip = intel_gen6_queue_flip;
14761 break; 14761 break;
14762 case 7: 14762 case 7:
14763 case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */ 14763 case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
14764 dev_priv->display.queue_flip = intel_gen7_queue_flip; 14764 dev_priv->display.queue_flip = intel_gen7_queue_flip;
14765 break; 14765 break;
14766 case 9: 14766 case 9:
14767 /* Drop through - unsupported since execlist only. */ 14767 /* Drop through - unsupported since execlist only. */
14768 default: 14768 default:
14769 /* Default just returns -ENODEV to indicate unsupported */ 14769 /* Default just returns -ENODEV to indicate unsupported */
14770 dev_priv->display.queue_flip = intel_default_queue_flip; 14770 dev_priv->display.queue_flip = intel_default_queue_flip;
14771 } 14771 }
14772 14772
 14773#ifdef __NetBSD__
 14774 linux_mutex_init(&dev_priv->pps_mutex);
 14775#else
14773 mutex_init(&dev_priv->pps_mutex); 14776 mutex_init(&dev_priv->pps_mutex);
 14777#endif
14774} 14778}
14775 14779
14776/* 14780/*
14777 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend, 14781 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
14778 * resume, or other times. This quirk makes sure that's the case for 14782 * resume, or other times. This quirk makes sure that's the case for
14779 * affected systems. 14783 * affected systems.
14780 */ 14784 */
14781static void quirk_pipea_force(struct drm_device *dev) 14785static void quirk_pipea_force(struct drm_device *dev)
14782{ 14786{
14783 struct drm_i915_private *dev_priv = dev->dev_private; 14787 struct drm_i915_private *dev_priv = dev->dev_private;
14784 14788
14785 dev_priv->quirks |= QUIRK_PIPEA_FORCE; 14789 dev_priv->quirks |= QUIRK_PIPEA_FORCE;
14786 DRM_INFO("applying pipe a force quirk\n"); 14790 DRM_INFO("applying pipe a force quirk\n");
14787} 14791}
14788 14792
14789static void quirk_pipeb_force(struct drm_device *dev) 14793static void quirk_pipeb_force(struct drm_device *dev)
14790{ 14794{
14791 struct drm_i915_private *dev_priv = dev->dev_private; 14795 struct drm_i915_private *dev_priv = dev->dev_private;
14792 14796
14793 dev_priv->quirks |= QUIRK_PIPEB_FORCE; 14797 dev_priv->quirks |= QUIRK_PIPEB_FORCE;
14794 DRM_INFO("applying pipe b force quirk\n"); 14798 DRM_INFO("applying pipe b force quirk\n");
14795} 14799}
14796 14800
14797/* 14801/*
14798 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason 14802 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
14799 */ 14803 */
14800static void quirk_ssc_force_disable(struct drm_device *dev) 14804static void quirk_ssc_force_disable(struct drm_device *dev)
14801{ 14805{
14802 struct drm_i915_private *dev_priv = dev->dev_private; 14806 struct drm_i915_private *dev_priv = dev->dev_private;
14803 dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE; 14807 dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
14804 DRM_INFO("applying lvds SSC disable quirk\n"); 14808 DRM_INFO("applying lvds SSC disable quirk\n");
14805} 14809}
14806 14810
14807/* 14811/*
14808 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight 14812 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
14809 * brightness value 14813 * brightness value
14810 */ 14814 */
14811static void quirk_invert_brightness(struct drm_device *dev) 14815static void quirk_invert_brightness(struct drm_device *dev)
14812{ 14816{
14813 struct drm_i915_private *dev_priv = dev->dev_private; 14817 struct drm_i915_private *dev_priv = dev->dev_private;
14814 dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS; 14818 dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
14815 DRM_INFO("applying inverted panel brightness quirk\n"); 14819 DRM_INFO("applying inverted panel brightness quirk\n");
14816} 14820}
14817 14821
14818/* Some VBT's incorrectly indicate no backlight is present */ 14822/* Some VBT's incorrectly indicate no backlight is present */
14819static void quirk_backlight_present(struct drm_device *dev) 14823static void quirk_backlight_present(struct drm_device *dev)
14820{ 14824{
14821 struct drm_i915_private *dev_priv = dev->dev_private; 14825 struct drm_i915_private *dev_priv = dev->dev_private;
14822 dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT; 14826 dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
14823 DRM_INFO("applying backlight present quirk\n"); 14827 DRM_INFO("applying backlight present quirk\n");
14824} 14828}
14825 14829
14826struct intel_quirk { 14830struct intel_quirk {
14827 int device; 14831 int device;
14828 int subsystem_vendor; 14832 int subsystem_vendor;
14829 int subsystem_device; 14833 int subsystem_device;
14830 void (*hook)(struct drm_device *dev); 14834 void (*hook)(struct drm_device *dev);
14831}; 14835};
14832 14836
14833/* For systems that don't have a meaningful PCI subdevice/subvendor ID */ 14837/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
14834struct intel_dmi_quirk { 14838struct intel_dmi_quirk {
14835 void (*hook)(struct drm_device *dev); 14839 void (*hook)(struct drm_device *dev);
14836 const struct dmi_system_id (*dmi_id_list)[]; 14840 const struct dmi_system_id (*dmi_id_list)[];
14837}; 14841};
14838 14842
14839static int intel_dmi_reverse_brightness(const struct dmi_system_id *id) 14843static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
14840{ 14844{
14841 DRM_INFO("Backlight polarity reversed on %s\n", id->ident); 14845 DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
14842 return 1; 14846 return 1;
14843} 14847}
14844 14848
14845static const struct intel_dmi_quirk intel_dmi_quirks[] = { 14849static const struct intel_dmi_quirk intel_dmi_quirks[] = {
14846 { 14850 {
14847 .dmi_id_list = &(const struct dmi_system_id[]) { 14851 .dmi_id_list = &(const struct dmi_system_id[]) {
14848 { 14852 {
14849 .callback = intel_dmi_reverse_brightness, 14853 .callback = intel_dmi_reverse_brightness,
14850 .ident = "NCR Corporation", 14854 .ident = "NCR Corporation",
14851 .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"), 14855 .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
14852 DMI_MATCH(DMI_PRODUCT_NAME, ""), 14856 DMI_MATCH(DMI_PRODUCT_NAME, ""),
14853 }, 14857 },
14854 }, 14858 },
14855 { .callback = NULL } /* terminating entry */ 14859 { .callback = NULL } /* terminating entry */
14856 }, 14860 },
14857 .hook = quirk_invert_brightness, 14861 .hook = quirk_invert_brightness,
14858 }, 14862 },
14859}; 14863};
14860 14864
14861static struct intel_quirk intel_quirks[] = { 14865static struct intel_quirk intel_quirks[] = {
14862 /* Toshiba Protege R-205, S-209 needs pipe A force quirk */ 14866 /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
14863 { 0x2592, 0x1179, 0x0001, quirk_pipea_force }, 14867 { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
14864 14868
14865 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ 14869 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
14866 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, 14870 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
14867 14871
14868 /* 830 needs to leave pipe A & dpll A up */ 14872 /* 830 needs to leave pipe A & dpll A up */
14869 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, 14873 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
14870 14874
14871 /* 830 needs to leave pipe B & dpll B up */ 14875 /* 830 needs to leave pipe B & dpll B up */
14872 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipeb_force }, 14876 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipeb_force },
14873 14877
14874 /* Lenovo U160 cannot use SSC on LVDS */ 14878 /* Lenovo U160 cannot use SSC on LVDS */
14875 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, 14879 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
14876 14880
14877 /* Sony Vaio Y cannot use SSC on LVDS */ 14881 /* Sony Vaio Y cannot use SSC on LVDS */
14878 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, 14882 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
14879 14883
14880 /* Acer Aspire 5734Z must invert backlight brightness */ 14884 /* Acer Aspire 5734Z must invert backlight brightness */
14881 { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness }, 14885 { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
14882 14886
14883 /* Acer/eMachines G725 */ 14887 /* Acer/eMachines G725 */
14884 { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness }, 14888 { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
14885 14889
14886 /* Acer/eMachines e725 */ 14890 /* Acer/eMachines e725 */
14887 { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness }, 14891 { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
14888 14892
14889 /* Acer/Packard Bell NCL20 */ 14893 /* Acer/Packard Bell NCL20 */
14890 { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness }, 14894 { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
14891 14895
14892 /* Acer Aspire 4736Z */ 14896 /* Acer Aspire 4736Z */
14893 { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, 14897 { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
14894 14898
14895 /* Acer Aspire 5336 */ 14899 /* Acer Aspire 5336 */
14896 { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness }, 14900 { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
14897 14901
14898 /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */ 14902 /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
14899 { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present }, 14903 { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
14900 14904
14901 /* Acer C720 Chromebook (Core i3 4005U) */ 14905 /* Acer C720 Chromebook (Core i3 4005U) */
14902 { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present }, 14906 { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
14903 14907
14904 /* Apple Macbook 2,1 (Core 2 T7400) */ 14908 /* Apple Macbook 2,1 (Core 2 T7400) */
14905 { 0x27a2, 0x8086, 0x7270, quirk_backlight_present }, 14909 { 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
14906 14910
14907 /* Apple Macbook 4,1 */ 14911 /* Apple Macbook 4,1 */
14908 { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present }, 14912 { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
14909 14913
14910 /* Toshiba CB35 Chromebook (Celeron 2955U) */ 14914 /* Toshiba CB35 Chromebook (Celeron 2955U) */
14911 { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present }, 14915 { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
14912 14916
14913 /* HP Chromebook 14 (Celeron 2955U) */ 14917 /* HP Chromebook 14 (Celeron 2955U) */
14914 { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present }, 14918 { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
14915 14919
14916 /* Dell Chromebook 11 */ 14920 /* Dell Chromebook 11 */
14917 { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present }, 14921 { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
14918 14922
14919 /* Dell Chromebook 11 (2015 version) */ 14923 /* Dell Chromebook 11 (2015 version) */
14920 { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present }, 14924 { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
14921}; 14925};
14922 14926
14923static void intel_init_quirks(struct drm_device *dev) 14927static void intel_init_quirks(struct drm_device *dev)
14924{ 14928{
14925 struct pci_dev *d = dev->pdev; 14929 struct pci_dev *d = dev->pdev;
14926 int i; 14930 int i;
14927 14931
14928 for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) { 14932 for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
14929 struct intel_quirk *q = &intel_quirks[i]; 14933 struct intel_quirk *q = &intel_quirks[i];
14930 14934
14931 if (d->device == q->device && 14935 if (d->device == q->device &&
14932 (d->subsystem_vendor == q->subsystem_vendor || 14936 (d->subsystem_vendor == q->subsystem_vendor ||
14933 q->subsystem_vendor == PCI_ANY_ID) && 14937 q->subsystem_vendor == PCI_ANY_ID) &&
14934 (d->subsystem_device == q->subsystem_device || 14938 (d->subsystem_device == q->subsystem_device ||
14935 q->subsystem_device == PCI_ANY_ID)) 14939 q->subsystem_device == PCI_ANY_ID))
14936 q->hook(dev); 14940 q->hook(dev);
14937 } 14941 }
14938 for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) { 14942 for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
14939 if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0) 14943 if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
14940 intel_dmi_quirks[i].hook(dev); 14944 intel_dmi_quirks[i].hook(dev);
14941 } 14945 }
14942} 14946}
14943 14947
14944/* Disable the VGA plane that we never use */ 14948/* Disable the VGA plane that we never use */
14945void i915_disable_vga(struct drm_device *dev) 14949void i915_disable_vga(struct drm_device *dev)
14946{ 14950{
14947 struct drm_i915_private *dev_priv = dev->dev_private; 14951 struct drm_i915_private *dev_priv = dev->dev_private;
14948 u8 sr1; 14952 u8 sr1;
14949 u32 vga_reg = i915_vgacntrl_reg(dev); 14953 u32 vga_reg = i915_vgacntrl_reg(dev);
14950 14954
14951#ifdef __NetBSD__ 14955#ifdef __NetBSD__
14952 { 14956 {
14953 const bus_addr_t vgabase = 0x3c0; 14957 const bus_addr_t vgabase = 0x3c0;
14954 const bus_space_tag_t iot = dev->pdev->pd_pa.pa_iot; 14958 const bus_space_tag_t iot = dev->pdev->pd_pa.pa_iot;
14955 bus_space_handle_t ioh; 14959 bus_space_handle_t ioh;
14956 int error; 14960 int error;
14957 14961
14958 error = bus_space_map(iot, vgabase, 0x10, 0, &ioh); 14962 error = bus_space_map(iot, vgabase, 0x10, 0, &ioh);
14959 if (error) { 14963 if (error) {
14960 aprint_error_dev(dev->pdev->pd_dev, 14964 aprint_error_dev(dev->pdev->pd_dev,
14961 "unable to map VGA registers: %d\n", error); 14965 "unable to map VGA registers: %d\n", error);
14962 } else { 14966 } else {
14963 CTASSERT(vgabase <= VGA_SR_INDEX); 14967 CTASSERT(vgabase <= VGA_SR_INDEX);
14964 CTASSERT(vgabase <= VGA_SR_DATA); 14968 CTASSERT(vgabase <= VGA_SR_DATA);
14965 bus_space_write_1(iot, ioh, VGA_SR_INDEX - vgabase, SR01); 14969 bus_space_write_1(iot, ioh, VGA_SR_INDEX - vgabase, SR01);
14966 sr1 = bus_space_read_1(iot, ioh, VGA_SR_DATA - vgabase); 14970 sr1 = bus_space_read_1(iot, ioh, VGA_SR_DATA - vgabase);
14967 bus_space_write_1(iot, ioh, VGA_SR_DATA - vgabase, 14971 bus_space_write_1(iot, ioh, VGA_SR_DATA - vgabase,
14968 (sr1 | __BIT(5))); 14972 (sr1 | __BIT(5)));
14969 bus_space_unmap(iot, ioh, 0x10); 14973 bus_space_unmap(iot, ioh, 0x10);
14970 } 14974 }
14971 } 14975 }
14972#else 14976#else
14973 /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */ 14977 /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
14974 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); 14978 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
14975 outb(SR01, VGA_SR_INDEX); 14979 outb(SR01, VGA_SR_INDEX);
14976 sr1 = inb(VGA_SR_DATA); 14980 sr1 = inb(VGA_SR_DATA);
14977 outb(sr1 | 1<<5, VGA_SR_DATA); 14981 outb(sr1 | 1<<5, VGA_SR_DATA);
14978 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); 14982 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
14979#endif 14983#endif
14980 udelay(300); 14984 udelay(300);
 14985#endif
14981 14986
14982 I915_WRITE(vga_reg, VGA_DISP_DISABLE); 14987 I915_WRITE(vga_reg, VGA_DISP_DISABLE);
14983 POSTING_READ(vga_reg); 14988 POSTING_READ(vga_reg);
14984} 14989}
14985 14990
14986void intel_modeset_init_hw(struct drm_device *dev) 14991void intel_modeset_init_hw(struct drm_device *dev)
14987{ 14992{
14988 intel_update_cdclk(dev); 14993 intel_update_cdclk(dev);
14989 intel_prepare_ddi(dev); 14994 intel_prepare_ddi(dev);
14990 intel_init_clock_gating(dev); 14995 intel_init_clock_gating(dev);
14991 intel_enable_gt_powersave(dev); 14996 intel_enable_gt_powersave(dev);
14992} 14997}
14993 14998
14994void intel_modeset_init(struct drm_device *dev) 14999void intel_modeset_init(struct drm_device *dev)
14995{ 15000{
14996 struct drm_i915_private *dev_priv = dev->dev_private; 15001 struct drm_i915_private *dev_priv = dev->dev_private;
14997 int sprite, ret; 15002 int sprite, ret;
14998 enum i915_pipe pipe; 15003 enum i915_pipe pipe;
14999 struct intel_crtc *crtc; 15004 struct intel_crtc *crtc;
15000 15005
15001 drm_mode_config_init(dev); 15006 drm_mode_config_init(dev);
15002 15007
15003 dev->mode_config.min_width = 0; 15008 dev->mode_config.min_width = 0;
15004 dev->mode_config.min_height = 0; 15009 dev->mode_config.min_height = 0;
15005 15010
15006 dev->mode_config.preferred_depth = 24; 15011 dev->mode_config.preferred_depth = 24;
15007 dev->mode_config.prefer_shadow = 1; 15012 dev->mode_config.prefer_shadow = 1;
15008 15013
15009 dev->mode_config.allow_fb_modifiers = true; 15014 dev->mode_config.allow_fb_modifiers = true;
15010 15015
15011 dev->mode_config.funcs = &intel_mode_funcs; 15016 dev->mode_config.funcs = &intel_mode_funcs;
15012 15017
15013 intel_init_quirks(dev); 15018 intel_init_quirks(dev);
15014 15019
15015 intel_init_pm(dev); 15020 intel_init_pm(dev);
15016 15021
15017 if (INTEL_INFO(dev)->num_pipes == 0) 15022 if (INTEL_INFO(dev)->num_pipes == 0)
15018 return; 15023 return;
15019 15024
15020 /* 15025 /*
15021 * There may be no VBT; and if the BIOS enabled SSC we can 15026 * There may be no VBT; and if the BIOS enabled SSC we can
15022 * just keep using it to avoid unnecessary flicker. Whereas if the 15027 * just keep using it to avoid unnecessary flicker. Whereas if the
15023 * BIOS isn't using it, don't assume it will work even if the VBT 15028 * BIOS isn't using it, don't assume it will work even if the VBT
15024 * indicates as much. 15029 * indicates as much.
15025 */ 15030 */
15026 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { 15031 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
15027 bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) & 15032 bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
15028 DREF_SSC1_ENABLE); 15033 DREF_SSC1_ENABLE);
15029 15034
15030 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) { 15035 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
15031 DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n", 15036 DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
15032 bios_lvds_use_ssc ? "en" : "dis", 15037 bios_lvds_use_ssc ? "en" : "dis",
15033 dev_priv->vbt.lvds_use_ssc ? "en" : "dis"); 15038 dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
15034 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc; 15039 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
15035 } 15040 }
15036 } 15041 }
15037 15042
15038 intel_init_display(dev); 15043 intel_init_display(dev);
15039 intel_init_audio(dev); 15044 intel_init_audio(dev);
15040 15045
15041 if (IS_GEN2(dev)) { 15046 if (IS_GEN2(dev)) {
15042 dev->mode_config.max_width = 2048; 15047 dev->mode_config.max_width = 2048;
15043 dev->mode_config.max_height = 2048; 15048 dev->mode_config.max_height = 2048;
15044 } else if (IS_GEN3(dev)) { 15049 } else if (IS_GEN3(dev)) {
15045 dev->mode_config.max_width = 4096; 15050 dev->mode_config.max_width = 4096;
15046 dev->mode_config.max_height = 4096; 15051 dev->mode_config.max_height = 4096;
15047 } else { 15052 } else {
15048 dev->mode_config.max_width = 8192; 15053 dev->mode_config.max_width = 8192;
15049 dev->mode_config.max_height = 8192; 15054 dev->mode_config.max_height = 8192;
15050 } 15055 }
15051 15056
15052 if (IS_845G(dev) || IS_I865G(dev)) { 15057 if (IS_845G(dev) || IS_I865G(dev)) {
15053 dev->mode_config.cursor_width = IS_845G(dev) ? 64 : 512; 15058 dev->mode_config.cursor_width = IS_845G(dev) ? 64 : 512;
15054 dev->mode_config.cursor_height = 1023; 15059 dev->mode_config.cursor_height = 1023;
15055 } else if (IS_GEN2(dev)) { 15060 } else if (IS_GEN2(dev)) {
15056 dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH; 15061 dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
15057 dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT; 15062 dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
15058 } else { 15063 } else {
15059 dev->mode_config.cursor_width = MAX_CURSOR_WIDTH; 15064 dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
15060 dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT; 15065 dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
15061 } 15066 }
15062 15067
15063 dev->mode_config.fb_base = dev_priv->gtt.mappable_base; 15068 dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
15064 15069
15065 DRM_DEBUG_KMS("%d display pipe%s available.\n", 15070 DRM_DEBUG_KMS("%d display pipe%s available.\n",
15066 INTEL_INFO(dev)->num_pipes, 15071 INTEL_INFO(dev)->num_pipes,
15067 INTEL_INFO(dev)->num_pipes > 1 ? "s" : ""); 15072 INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
15068 15073
15069 for_each_pipe(dev_priv, pipe) { 15074 for_each_pipe(dev_priv, pipe) {
15070 intel_crtc_init(dev, pipe); 15075 intel_crtc_init(dev, pipe);
15071 for_each_sprite(dev_priv, pipe, sprite) { 15076 for_each_sprite(dev_priv, pipe, sprite) {
15072 ret = intel_plane_init(dev, pipe, sprite); 15077 ret = intel_plane_init(dev, pipe, sprite);
15073 if (ret) 15078 if (ret)
15074 DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n", 15079 DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
15075 pipe_name(pipe), sprite_name(pipe, sprite), ret); 15080 pipe_name(pipe), sprite_name(pipe, sprite), ret);
15076 } 15081 }
15077 } 15082 }
15078 15083
15079 intel_update_czclk(dev_priv); 15084 intel_update_czclk(dev_priv);
15080 intel_update_cdclk(dev); 15085 intel_update_cdclk(dev);
15081 15086
15082 intel_shared_dpll_init(dev); 15087 intel_shared_dpll_init(dev);
15083 15088
15084#ifndef __NetBSD__ /* XXX We wait until intelfb is ready. */ 15089#ifndef __NetBSD__ /* XXX We wait until intelfb is ready. */
15085 /* Just disable it once at startup */ 15090 /* Just disable it once at startup */
15086 i915_disable_vga(dev); 15091 i915_disable_vga(dev);
15087#endif 15092#endif
15088 intel_setup_outputs(dev); 15093 intel_setup_outputs(dev);
15089 15094
15090 /* Just in case the BIOS is doing something questionable. */ 15095 /* Just in case the BIOS is doing something questionable. */
15091 intel_fbc_disable(dev_priv); 15096 intel_fbc_disable(dev_priv);
15092 15097
15093 drm_modeset_lock_all(dev); 15098 drm_modeset_lock_all(dev);
15094 intel_modeset_setup_hw_state(dev); 15099 intel_modeset_setup_hw_state(dev);
15095 drm_modeset_unlock_all(dev); 15100 drm_modeset_unlock_all(dev);
15096 15101
15097 for_each_intel_crtc(dev, crtc) { 15102 for_each_intel_crtc(dev, crtc) {
15098 struct intel_initial_plane_config plane_config = {}; 15103 struct intel_initial_plane_config plane_config = {};
15099 15104
15100 if (!crtc->active) 15105 if (!crtc->active)
15101 continue; 15106 continue;
15102 15107
15103 /* 15108 /*
15104 * Note that reserving the BIOS fb up front prevents us 15109 * Note that reserving the BIOS fb up front prevents us
15105 * from stuffing other stolen allocations like the ring 15110 * from stuffing other stolen allocations like the ring
15106 * on top. This prevents some ugliness at boot time, and 15111 * on top. This prevents some ugliness at boot time, and
15107 * can even allow for smooth boot transitions if the BIOS 15112 * can even allow for smooth boot transitions if the BIOS
15108 * fb is large enough for the active pipe configuration. 15113 * fb is large enough for the active pipe configuration.
15109 */ 15114 */
15110 dev_priv->display.get_initial_plane_config(crtc, 15115 dev_priv->display.get_initial_plane_config(crtc,
15111 &plane_config); 15116 &plane_config);
15112 15117
15113 /* 15118 /*
15114 * If the fb is shared between multiple heads, we'll 15119 * If the fb is shared between multiple heads, we'll
15115 * just get the first one. 15120 * just get the first one.
15116 */ 15121 */
15117 intel_find_initial_plane_obj(crtc, &plane_config); 15122 intel_find_initial_plane_obj(crtc, &plane_config);
15118 } 15123 }
15119} 15124}
15120 15125
15121static void intel_enable_pipe_a(struct drm_device *dev) 15126static void intel_enable_pipe_a(struct drm_device *dev)
15122{ 15127{
15123 struct intel_connector *connector; 15128 struct intel_connector *connector;
15124 struct drm_connector *crt = NULL; 15129 struct drm_connector *crt = NULL;
15125 struct intel_load_detect_pipe load_detect_temp; 15130 struct intel_load_detect_pipe load_detect_temp;
15126 struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx; 15131 struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
15127 15132
15128 /* We can't just switch on the pipe A, we need to set things up with a 15133 /* We can't just switch on the pipe A, we need to set things up with a
15129 * proper mode and output configuration. As a gross hack, enable pipe A 15134 * proper mode and output configuration. As a gross hack, enable pipe A
15130 * by enabling the load detect pipe once. */ 15135 * by enabling the load detect pipe once. */
15131 for_each_intel_connector(dev, connector) { 15136 for_each_intel_connector(dev, connector) {
15132 if (connector->encoder->type == INTEL_OUTPUT_ANALOG) { 15137 if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
15133 crt = &connector->base; 15138 crt = &connector->base;
15134 break; 15139 break;
15135 } 15140 }
15136 } 15141 }
15137 15142
15138 if (!crt) 15143 if (!crt)
15139 return; 15144 return;
15140 15145
15141 if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx)) 15146 if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
15142 intel_release_load_detect_pipe(crt, &load_detect_temp, ctx); 15147 intel_release_load_detect_pipe(crt, &load_detect_temp, ctx);
15143} 15148}
15144 15149
15145static bool 15150static bool
15146intel_check_plane_mapping(struct intel_crtc *crtc) 15151intel_check_plane_mapping(struct intel_crtc *crtc)
15147{ 15152{
15148 struct drm_device *dev = crtc->base.dev; 15153 struct drm_device *dev = crtc->base.dev;
15149 struct drm_i915_private *dev_priv = dev->dev_private; 15154 struct drm_i915_private *dev_priv = dev->dev_private;
15150 u32 val; 15155 u32 val;
15151 15156
15152 if (INTEL_INFO(dev)->num_pipes == 1) 15157 if (INTEL_INFO(dev)->num_pipes == 1)
15153 return true; 15158 return true;
15154 15159
15155 val = I915_READ(DSPCNTR(!crtc->plane)); 15160 val = I915_READ(DSPCNTR(!crtc->plane));
15156 15161
15157 if ((val & DISPLAY_PLANE_ENABLE) && 15162 if ((val & DISPLAY_PLANE_ENABLE) &&
15158 (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe)) 15163 (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
15159 return false; 15164 return false;
15160 15165
15161 return true; 15166 return true;
15162} 15167}
15163 15168
15164static bool intel_crtc_has_encoders(struct intel_crtc *crtc) 15169static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
15165{ 15170{
15166 struct drm_device *dev = crtc->base.dev; 15171 struct drm_device *dev = crtc->base.dev;
15167 struct intel_encoder *encoder; 15172 struct intel_encoder *encoder;
15168 15173
15169 for_each_encoder_on_crtc(dev, &crtc->base, encoder) 15174 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
15170 return true; 15175 return true;
15171 15176
15172 return false; 15177 return false;
15173} 15178}
15174 15179
15175static void intel_sanitize_crtc(struct intel_crtc *crtc) 15180static void intel_sanitize_crtc(struct intel_crtc *crtc)
15176{ 15181{
15177 struct drm_device *dev = crtc->base.dev; 15182 struct drm_device *dev = crtc->base.dev;
15178 struct drm_i915_private *dev_priv = dev->dev_private; 15183 struct drm_i915_private *dev_priv = dev->dev_private;
15179 u32 reg; 15184 u32 reg;
15180 15185
15181 /* Clear any frame start delays used for debugging left by the BIOS */ 15186 /* Clear any frame start delays used for debugging left by the BIOS */
15182 reg = PIPECONF(crtc->config->cpu_transcoder); 15187 reg = PIPECONF(crtc->config->cpu_transcoder);
15183 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); 15188 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
15184 15189
15185 /* restore vblank interrupts to correct state */ 15190 /* restore vblank interrupts to correct state */
15186 drm_crtc_vblank_reset(&crtc->base); 15191 drm_crtc_vblank_reset(&crtc->base);
15187 if (crtc->active) { 15192 if (crtc->active) {
15188 struct intel_plane *plane; 15193 struct intel_plane *plane;
15189 15194
15190 drm_crtc_vblank_on(&crtc->base); 15195 drm_crtc_vblank_on(&crtc->base);
15191 15196
15192 /* Disable everything but the primary plane */ 15197 /* Disable everything but the primary plane */
15193 for_each_intel_plane_on_crtc(dev, crtc, plane) { 15198 for_each_intel_plane_on_crtc(dev, crtc, plane) {
15194 if (plane->base.type == DRM_PLANE_TYPE_PRIMARY) 15199 if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
15195 continue; 15200 continue;
15196 15201
15197 plane->disable_plane(&plane->base, &crtc->base); 15202 plane->disable_plane(&plane->base, &crtc->base);
15198 } 15203 }
15199 } 15204 }
15200 15205
15201 /* We need to sanitize the plane -> pipe mapping first because this will 15206 /* We need to sanitize the plane -> pipe mapping first because this will
15202 * disable the crtc (and hence change the state) if it is wrong. Note 15207 * disable the crtc (and hence change the state) if it is wrong. Note
15203 * that gen4+ has a fixed plane -> pipe mapping. */ 15208 * that gen4+ has a fixed plane -> pipe mapping. */
15204 if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) { 15209 if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
15205 bool plane; 15210 bool plane;
15206 15211
15207 DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n", 15212 DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
15208 crtc->base.base.id); 15213 crtc->base.base.id);
15209 15214
15210 /* Pipe has the wrong plane attached and the plane is active. 15215 /* Pipe has the wrong plane attached and the plane is active.
15211 * Temporarily change the plane mapping and disable everything 15216 * Temporarily change the plane mapping and disable everything
15212 * ... */ 15217 * ... */
15213 plane = crtc->plane; 15218 plane = crtc->plane;
15214 to_intel_plane_state(crtc->base.primary->state)->visible = true; 15219 to_intel_plane_state(crtc->base.primary->state)->visible = true;
15215 crtc->plane = !plane; 15220 crtc->plane = !plane;
15216 intel_crtc_disable_noatomic(&crtc->base); 15221 intel_crtc_disable_noatomic(&crtc->base);
15217 crtc->plane = plane; 15222 crtc->plane = plane;
15218 } 15223 }
15219 15224
15220 if (dev_priv->quirks & QUIRK_PIPEA_FORCE && 15225 if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
15221 crtc->pipe == PIPE_A && !crtc->active) { 15226 crtc->pipe == PIPE_A && !crtc->active) {
15222 /* BIOS forgot to enable pipe A, this mostly happens after 15227 /* BIOS forgot to enable pipe A, this mostly happens after
15223 * resume. Force-enable the pipe to fix this, the update_dpms 15228 * resume. Force-enable the pipe to fix this, the update_dpms
15224 * call below we restore the pipe to the right state, but leave 15229 * call below we restore the pipe to the right state, but leave
15225 * the required bits on. */ 15230 * the required bits on. */
15226 intel_enable_pipe_a(dev); 15231 intel_enable_pipe_a(dev);
15227 } 15232 }
15228 15233
15229 /* Adjust the state of the output pipe according to whether we 15234 /* Adjust the state of the output pipe according to whether we
15230 * have active connectors/encoders. */ 15235 * have active connectors/encoders. */
15231 if (!intel_crtc_has_encoders(crtc)) 15236 if (!intel_crtc_has_encoders(crtc))
15232 intel_crtc_disable_noatomic(&crtc->base); 15237 intel_crtc_disable_noatomic(&crtc->base);
15233 15238
15234 if (crtc->active != crtc->base.state->active) { 15239 if (crtc->active != crtc->base.state->active) {
15235 struct intel_encoder *encoder; 15240 struct intel_encoder *encoder;
15236 15241
15237 /* This can happen either due to bugs in the get_hw_state 15242 /* This can happen either due to bugs in the get_hw_state
15238 * functions or because of calls to intel_crtc_disable_noatomic, 15243 * functions or because of calls to intel_crtc_disable_noatomic,
15239 * or because the pipe is force-enabled due to the 15244 * or because the pipe is force-enabled due to the
15240 * pipe A quirk. */ 15245 * pipe A quirk. */
15241 DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n", 15246 DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
15242 crtc->base.base.id, 15247 crtc->base.base.id,
15243 crtc->base.state->enable ? "enabled" : "disabled", 15248 crtc->base.state->enable ? "enabled" : "disabled",
15244 crtc->active ? "enabled" : "disabled"); 15249 crtc->active ? "enabled" : "disabled");
15245 15250
15246 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, NULL) < 0); 15251 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, NULL) < 0);
15247 crtc->base.state->active = crtc->active; 15252 crtc->base.state->active = crtc->active;
15248 crtc->base.enabled = crtc->active; 15253 crtc->base.enabled = crtc->active;
15249 15254
15250 /* Because we only establish the connector -> encoder -> 15255 /* Because we only establish the connector -> encoder ->
15251 * crtc links if something is active, this means the 15256 * crtc links if something is active, this means the
15252 * crtc is now deactivated. Break the links. connector 15257 * crtc is now deactivated. Break the links. connector
15253 * -> encoder links are only establish when things are 15258 * -> encoder links are only establish when things are
15254 * actually up, hence no need to break them. */ 15259 * actually up, hence no need to break them. */
15255 WARN_ON(crtc->active); 15260 WARN_ON(crtc->active);
15256 15261
15257 for_each_encoder_on_crtc(dev, &crtc->base, encoder) 15262 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
15258 encoder->base.crtc = NULL; 15263 encoder->base.crtc = NULL;
15259 } 15264 }
15260 15265
15261 if (crtc->active || HAS_GMCH_DISPLAY(dev)) { 15266 if (crtc->active || HAS_GMCH_DISPLAY(dev)) {
15262 /* 15267 /*
15263 * We start out with underrun reporting disabled to avoid races. 15268 * We start out with underrun reporting disabled to avoid races.
15264 * For correct bookkeeping mark this on active crtcs. 15269 * For correct bookkeeping mark this on active crtcs.
15265 * 15270 *
15266 * Also on gmch platforms we dont have any hardware bits to 15271 * Also on gmch platforms we dont have any hardware bits to
15267 * disable the underrun reporting. Which means we need to start 15272 * disable the underrun reporting. Which means we need to start
15268 * out with underrun reporting disabled also on inactive pipes, 15273 * out with underrun reporting disabled also on inactive pipes,
15269 * since otherwise we'll complain about the garbage we read when 15274 * since otherwise we'll complain about the garbage we read when
15270 * e.g. coming up after runtime pm. 15275 * e.g. coming up after runtime pm.
15271 * 15276 *
15272 * No protection against concurrent access is required - at 15277 * No protection against concurrent access is required - at
15273 * worst a fifo underrun happens which also sets this to false. 15278 * worst a fifo underrun happens which also sets this to false.
15274 */ 15279 */
15275 crtc->cpu_fifo_underrun_disabled = true; 15280 crtc->cpu_fifo_underrun_disabled = true;
15276 crtc->pch_fifo_underrun_disabled = true; 15281 crtc->pch_fifo_underrun_disabled = true;
15277 } 15282 }
15278} 15283}
15279 15284
15280static void intel_sanitize_encoder(struct intel_encoder *encoder) 15285static void intel_sanitize_encoder(struct intel_encoder *encoder)
15281{ 15286{
15282 struct intel_connector *connector; 15287 struct intel_connector *connector;
15283 struct drm_device *dev = encoder->base.dev; 15288 struct drm_device *dev = encoder->base.dev;
15284 bool active = false; 15289 bool active = false;
15285 15290
15286 /* We need to check both for a crtc link (meaning that the 15291 /* We need to check both for a crtc link (meaning that the
15287 * encoder is active and trying to read from a pipe) and the 15292 * encoder is active and trying to read from a pipe) and the
15288 * pipe itself being active. */ 15293 * pipe itself being active. */
15289 bool has_active_crtc = encoder->base.crtc && 15294 bool has_active_crtc = encoder->base.crtc &&
15290 to_intel_crtc(encoder->base.crtc)->active; 15295 to_intel_crtc(encoder->base.crtc)->active;
15291 15296
15292 for_each_intel_connector(dev, connector) { 15297 for_each_intel_connector(dev, connector) {
15293 if (connector->base.encoder != &encoder->base) 15298 if (connector->base.encoder != &encoder->base)
15294 continue; 15299 continue;
15295 15300
15296 active = true; 15301 active = true;
15297 break; 15302 break;
15298 } 15303 }
15299 15304
15300 if (active && !has_active_crtc) { 15305 if (active && !has_active_crtc) {
15301 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n", 15306 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
15302 encoder->base.base.id, 15307 encoder->base.base.id,
15303 encoder->base.name); 15308 encoder->base.name);
15304 15309
15305 /* Connector is active, but has no active pipe. This is 15310 /* Connector is active, but has no active pipe. This is
15306 * fallout from our resume register restoring. Disable 15311 * fallout from our resume register restoring. Disable
15307 * the encoder manually again. */ 15312 * the encoder manually again. */
15308 if (encoder->base.crtc) { 15313 if (encoder->base.crtc) {
15309 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", 15314 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
15310 encoder->base.base.id, 15315 encoder->base.base.id,
15311 encoder->base.name); 15316 encoder->base.name);
15312 encoder->disable(encoder); 15317 encoder->disable(encoder);
15313 if (encoder->post_disable) 15318 if (encoder->post_disable)
15314 encoder->post_disable(encoder); 15319 encoder->post_disable(encoder);
15315 } 15320 }
15316 encoder->base.crtc = NULL; 15321 encoder->base.crtc = NULL;
15317 15322
15318 /* Inconsistent output/port/pipe state happens presumably due to 15323 /* Inconsistent output/port/pipe state happens presumably due to
15319 * a bug in one of the get_hw_state functions. Or someplace else 15324 * a bug in one of the get_hw_state functions. Or someplace else
15320 * in our code, like the register restore mess on resume. Clamp 15325 * in our code, like the register restore mess on resume. Clamp
15321 * things to off as a safer default. */ 15326 * things to off as a safer default. */
15322 for_each_intel_connector(dev, connector) { 15327 for_each_intel_connector(dev, connector) {
15323 if (connector->encoder != encoder) 15328 if (connector->encoder != encoder)
15324 continue; 15329 continue;
15325 connector->base.dpms = DRM_MODE_DPMS_OFF; 15330 connector->base.dpms = DRM_MODE_DPMS_OFF;
15326 connector->base.encoder = NULL; 15331 connector->base.encoder = NULL;
15327 } 15332 }
15328 } 15333 }
15329 /* Enabled encoders without active connectors will be fixed in 15334 /* Enabled encoders without active connectors will be fixed in
15330 * the crtc fixup. */ 15335 * the crtc fixup. */
15331} 15336}
15332 15337
15333void i915_redisable_vga_power_on(struct drm_device *dev) 15338void i915_redisable_vga_power_on(struct drm_device *dev)
15334{ 15339{
15335 struct drm_i915_private *dev_priv = dev->dev_private; 15340 struct drm_i915_private *dev_priv = dev->dev_private;
15336 u32 vga_reg = i915_vgacntrl_reg(dev); 15341 u32 vga_reg = i915_vgacntrl_reg(dev);
15337 15342
15338 if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) { 15343 if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
15339 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); 15344 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
15340 i915_disable_vga(dev); 15345 i915_disable_vga(dev);
15341 } 15346 }
15342} 15347}
15343 15348
15344void i915_redisable_vga(struct drm_device *dev) 15349void i915_redisable_vga(struct drm_device *dev)
15345{ 15350{
15346 struct drm_i915_private *dev_priv = dev->dev_private; 15351 struct drm_i915_private *dev_priv = dev->dev_private;
15347 15352
15348 /* This function can be called both from intel_modeset_setup_hw_state or 15353 /* This function can be called both from intel_modeset_setup_hw_state or
15349 * at a very early point in our resume sequence, where the power well 15354 * at a very early point in our resume sequence, where the power well
15350 * structures are not yet restored. Since this function is at a very 15355 * structures are not yet restored. Since this function is at a very
15351 * paranoid "someone might have enabled VGA while we were not looking" 15356 * paranoid "someone might have enabled VGA while we were not looking"
15352 * level, just check if the power well is enabled instead of trying to 15357 * level, just check if the power well is enabled instead of trying to
15353 * follow the "don't touch the power well if we don't need it" policy 15358 * follow the "don't touch the power well if we don't need it" policy
15354 * the rest of the driver uses. */ 15359 * the rest of the driver uses. */
15355 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_VGA)) 15360 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_VGA))
15356 return; 15361 return;
15357 15362
15358 i915_redisable_vga_power_on(dev); 15363 i915_redisable_vga_power_on(dev);
15359} 15364}
15360 15365
15361static bool primary_get_hw_state(struct intel_plane *plane) 15366static bool primary_get_hw_state(struct intel_plane *plane)
15362{ 15367{
15363 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 15368 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
15364 15369
15365 return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE; 15370 return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE;
15366} 15371}
15367 15372
15368/* FIXME read out full plane state for all planes */ 15373/* FIXME read out full plane state for all planes */
15369static void readout_plane_state(struct intel_crtc *crtc) 15374static void readout_plane_state(struct intel_crtc *crtc)
15370{ 15375{
15371 struct drm_plane *primary = crtc->base.primary; 15376 struct drm_plane *primary = crtc->base.primary;
15372 struct intel_plane_state *plane_state = 15377 struct intel_plane_state *plane_state =
15373 to_intel_plane_state(primary->state); 15378 to_intel_plane_state(primary->state);
15374 15379
15375 plane_state->visible = 15380 plane_state->visible =
15376 primary_get_hw_state(to_intel_plane(primary)); 15381 primary_get_hw_state(to_intel_plane(primary));
15377 15382
15378 if (plane_state->visible) 15383 if (plane_state->visible)
15379 crtc->base.state->plane_mask |= 1 << drm_plane_index(primary); 15384 crtc->base.state->plane_mask |= 1 << drm_plane_index(primary);
15380} 15385}
15381 15386
15382static void intel_modeset_readout_hw_state(struct drm_device *dev) 15387static void intel_modeset_readout_hw_state(struct drm_device *dev)
15383{ 15388{
15384 struct drm_i915_private *dev_priv = dev->dev_private; 15389 struct drm_i915_private *dev_priv = dev->dev_private;
15385 enum i915_pipe pipe; 15390 enum i915_pipe pipe;
15386 struct intel_crtc *crtc; 15391 struct intel_crtc *crtc;
15387 struct intel_encoder *encoder; 15392 struct intel_encoder *encoder;
15388 struct intel_connector *connector; 15393 struct intel_connector *connector;
15389 int i; 15394 int i;
15390 15395
15391 for_each_intel_crtc(dev, crtc) { 15396 for_each_intel_crtc(dev, crtc) {
15392 __drm_atomic_helper_crtc_destroy_state(&crtc->base, crtc->base.state); 15397 __drm_atomic_helper_crtc_destroy_state(&crtc->base, crtc->base.state);
15393 memset(crtc->config, 0, sizeof(*crtc->config)); 15398 memset(crtc->config, 0, sizeof(*crtc->config));
15394 crtc->config->base.crtc = &crtc->base; 15399 crtc->config->base.crtc = &crtc->base;
15395 15400
15396 crtc->active = dev_priv->display.get_pipe_config(crtc, 15401 crtc->active = dev_priv->display.get_pipe_config(crtc,
15397 crtc->config); 15402 crtc->config);
15398 15403
15399 crtc->base.state->active = crtc->active; 15404 crtc->base.state->active = crtc->active;
15400 crtc->base.enabled = crtc->active; 15405 crtc->base.enabled = crtc->active;
15401 15406
15402 readout_plane_state(crtc); 15407 readout_plane_state(crtc);
15403 15408
15404 DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n", 15409 DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
15405 crtc->base.base.id, 15410 crtc->base.base.id,
15406 crtc->active ? "enabled" : "disabled"); 15411 crtc->active ? "enabled" : "disabled");
15407 } 15412 }
15408 15413
15409 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 15414 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15410 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 15415 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15411 15416
15412 pll->on = pll->get_hw_state(dev_priv, pll, 15417 pll->on = pll->get_hw_state(dev_priv, pll,
15413 &pll->config.hw_state); 15418 &pll->config.hw_state);
15414 pll->active = 0; 15419 pll->active = 0;
15415 pll->config.crtc_mask = 0; 15420 pll->config.crtc_mask = 0;
15416 for_each_intel_crtc(dev, crtc) { 15421 for_each_intel_crtc(dev, crtc) {
15417 if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) { 15422 if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) {
15418 pll->active++; 15423 pll->active++;
15419 pll->config.crtc_mask |= 1 << crtc->pipe; 15424 pll->config.crtc_mask |= 1 << crtc->pipe;
15420 } 15425 }
15421 } 15426 }
15422 15427
15423 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n", 15428 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
15424 pll->name, pll->config.crtc_mask, pll->on); 15429 pll->name, pll->config.crtc_mask, pll->on);
15425 15430
15426 if (pll->config.crtc_mask) 15431 if (pll->config.crtc_mask)
15427 intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS); 15432 intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
15428 } 15433 }
15429 15434
15430 for_each_intel_encoder(dev, encoder) { 15435 for_each_intel_encoder(dev, encoder) {
15431 pipe = 0; 15436 pipe = 0;
15432 15437
15433 if (encoder->get_hw_state(encoder, &pipe)) { 15438 if (encoder->get_hw_state(encoder, &pipe)) {
15434 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 15439 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
15435 encoder->base.crtc = &crtc->base; 15440 encoder->base.crtc = &crtc->base;
15436 encoder->get_config(encoder, crtc->config); 15441 encoder->get_config(encoder, crtc->config);
15437 } else { 15442 } else {
15438 encoder->base.crtc = NULL; 15443 encoder->base.crtc = NULL;
15439 } 15444 }
15440 15445
15441 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n", 15446 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
15442 encoder->base.base.id, 15447 encoder->base.base.id,
15443 encoder->base.name, 15448 encoder->base.name,
15444 encoder->base.crtc ? "enabled" : "disabled", 15449 encoder->base.crtc ? "enabled" : "disabled",
15445 pipe_name(pipe)); 15450 pipe_name(pipe));
15446 } 15451 }
15447 15452
15448 for_each_intel_connector(dev, connector) { 15453 for_each_intel_connector(dev, connector) {
15449 if (connector->get_hw_state(connector)) { 15454 if (connector->get_hw_state(connector)) {
15450 connector->base.dpms = DRM_MODE_DPMS_ON; 15455 connector->base.dpms = DRM_MODE_DPMS_ON;
15451 connector->base.encoder = &connector->encoder->base; 15456 connector->base.encoder = &connector->encoder->base;
15452 } else { 15457 } else {
15453 connector->base.dpms = DRM_MODE_DPMS_OFF; 15458 connector->base.dpms = DRM_MODE_DPMS_OFF;
15454 connector->base.encoder = NULL; 15459 connector->base.encoder = NULL;
15455 } 15460 }
15456 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n", 15461 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
15457 connector->base.base.id, 15462 connector->base.base.id,
15458 connector->base.name, 15463 connector->base.name,
15459 connector->base.encoder ? "enabled" : "disabled"); 15464 connector->base.encoder ? "enabled" : "disabled");
15460 } 15465 }
15461 15466
15462 for_each_intel_crtc(dev, crtc) { 15467 for_each_intel_crtc(dev, crtc) {
15463 crtc->base.hwmode = crtc->config->base.adjusted_mode; 15468 crtc->base.hwmode = crtc->config->base.adjusted_mode;
15464 15469
15465 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode)); 15470 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
15466 if (crtc->base.state->active) { 15471 if (crtc->base.state->active) {
15467 intel_mode_from_pipe_config(&crtc->base.mode, crtc->config); 15472 intel_mode_from_pipe_config(&crtc->base.mode, crtc->config);
15468 intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config); 15473 intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config);
15469 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode)); 15474 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
15470 15475
15471 /* 15476 /*
15472 * The initial mode needs to be set in order to keep 15477 * The initial mode needs to be set in order to keep
15473 * the atomic core happy. It wants a valid mode if the 15478 * the atomic core happy. It wants a valid mode if the
15474 * crtc's enabled, so we do the above call. 15479 * crtc's enabled, so we do the above call.
15475 * 15480 *
15476 * At this point some state updated by the connectors 15481 * At this point some state updated by the connectors
15477 * in their ->detect() callback has not run yet, so 15482 * in their ->detect() callback has not run yet, so
15478 * no recalculation can be done yet. 15483 * no recalculation can be done yet.
15479 * 15484 *
15480 * Even if we could do a recalculation and modeset 15485 * Even if we could do a recalculation and modeset
15481 * right now it would cause a double modeset if 15486 * right now it would cause a double modeset if
15482 * fbdev or userspace chooses a different initial mode. 15487 * fbdev or userspace chooses a different initial mode.
15483 * 15488 *
15484 * If that happens, someone indicated they wanted a 15489 * If that happens, someone indicated they wanted a
15485 * mode change, which means it's safe to do a full 15490 * mode change, which means it's safe to do a full
15486 * recalculation. 15491 * recalculation.
15487 */ 15492 */
15488 crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED; 15493 crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
15489 15494
15490 drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode); 15495 drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
15491 update_scanline_offset(crtc); 15496 update_scanline_offset(crtc);
15492 } 15497 }
15493 } 15498 }
15494} 15499}
15495 15500
15496/* Scan out the current hw modeset state, 15501/* Scan out the current hw modeset state,
15497 * and sanitizes it to the current state 15502 * and sanitizes it to the current state
15498 */ 15503 */
15499static void 15504static void
15500intel_modeset_setup_hw_state(struct drm_device *dev) 15505intel_modeset_setup_hw_state(struct drm_device *dev)
15501{ 15506{
15502 struct drm_i915_private *dev_priv = dev->dev_private; 15507 struct drm_i915_private *dev_priv = dev->dev_private;
15503 enum i915_pipe pipe; 15508 enum i915_pipe pipe;
15504 struct intel_crtc *crtc; 15509 struct intel_crtc *crtc;
15505 struct intel_encoder *encoder; 15510 struct intel_encoder *encoder;
15506 int i; 15511 int i;
15507 15512
15508 intel_modeset_readout_hw_state(dev); 15513 intel_modeset_readout_hw_state(dev);
15509 15514
15510 /* HW state is read out, now we need to sanitize this mess. */ 15515 /* HW state is read out, now we need to sanitize this mess. */
15511 for_each_intel_encoder(dev, encoder) { 15516 for_each_intel_encoder(dev, encoder) {
15512 intel_sanitize_encoder(encoder); 15517 intel_sanitize_encoder(encoder);
15513 } 15518 }
15514 15519
15515 for_each_pipe(dev_priv, pipe) { 15520 for_each_pipe(dev_priv, pipe) {
15516 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 15521 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
15517 intel_sanitize_crtc(crtc); 15522 intel_sanitize_crtc(crtc);
15518 intel_dump_pipe_config(crtc, crtc->config, 15523 intel_dump_pipe_config(crtc, crtc->config,
15519 "[setup_hw_state]"); 15524 "[setup_hw_state]");
15520 } 15525 }
15521 15526
15522 intel_modeset_update_connector_atomic_state(dev); 15527 intel_modeset_update_connector_atomic_state(dev);
15523 15528
15524 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 15529 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15525 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 15530 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15526 15531
15527 if (!pll->on || pll->active) 15532 if (!pll->on || pll->active)
15528 continue; 15533 continue;
15529 15534
15530 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name); 15535 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
15531 15536
15532 pll->disable(dev_priv, pll); 15537 pll->disable(dev_priv, pll);
15533 pll->on = false; 15538 pll->on = false;
15534 } 15539 }
15535 15540
15536 if (IS_VALLEYVIEW(dev)) 15541 if (IS_VALLEYVIEW(dev))
15537 vlv_wm_get_hw_state(dev); 15542 vlv_wm_get_hw_state(dev);
15538 else if (IS_GEN9(dev)) 15543 else if (IS_GEN9(dev))
15539 skl_wm_get_hw_state(dev); 15544 skl_wm_get_hw_state(dev);
15540 else if (HAS_PCH_SPLIT(dev)) 15545 else if (HAS_PCH_SPLIT(dev))
15541 ilk_wm_get_hw_state(dev); 15546 ilk_wm_get_hw_state(dev);
15542 15547
15543 for_each_intel_crtc(dev, crtc) { 15548 for_each_intel_crtc(dev, crtc) {
15544 unsigned long put_domains; 15549 unsigned long put_domains;
15545 15550
15546 put_domains = modeset_get_crtc_power_domains(&crtc->base); 15551 put_domains = modeset_get_crtc_power_domains(&crtc->base);
15547 if (WARN_ON(put_domains)) 15552 if (WARN_ON(put_domains))
15548 modeset_put_power_domains(dev_priv, put_domains); 15553 modeset_put_power_domains(dev_priv, put_domains);
15549 } 15554 }
15550 intel_display_set_init_power(dev_priv, false); 15555 intel_display_set_init_power(dev_priv, false);
15551} 15556}
15552 15557
15553void intel_display_resume(struct drm_device *dev) 15558void intel_display_resume(struct drm_device *dev)
15554{ 15559{
15555 struct drm_atomic_state *state = drm_atomic_state_alloc(dev); 15560 struct drm_atomic_state *state = drm_atomic_state_alloc(dev);
15556 struct intel_connector *conn; 15561 struct intel_connector *conn;
15557 struct intel_plane *plane; 15562 struct intel_plane *plane;
15558 struct drm_crtc *crtc; 15563 struct drm_crtc *crtc;
15559 int ret; 15564 int ret;
15560 15565
15561 if (!state) 15566 if (!state)
15562 return; 15567 return;
15563 15568
15564 state->acquire_ctx = dev->mode_config.acquire_ctx; 15569 state->acquire_ctx = dev->mode_config.acquire_ctx;
15565 15570
15566 /* preserve complete old state, including dpll */ 15571 /* preserve complete old state, including dpll */
15567 intel_atomic_get_shared_dpll_state(state); 15572 intel_atomic_get_shared_dpll_state(state);
15568 15573
15569 for_each_crtc(dev, crtc) { 15574 for_each_crtc(dev, crtc) {
15570 struct drm_crtc_state *crtc_state = 15575 struct drm_crtc_state *crtc_state =
15571 drm_atomic_get_crtc_state(state, crtc); 15576 drm_atomic_get_crtc_state(state, crtc);
15572 15577
15573 ret = PTR_ERR_OR_ZERO(crtc_state); 15578 ret = PTR_ERR_OR_ZERO(crtc_state);
15574 if (ret) 15579 if (ret)
15575 goto err; 15580 goto err;
15576 15581
15577 /* force a restore */ 15582 /* force a restore */
15578 crtc_state->mode_changed = true; 15583 crtc_state->mode_changed = true;
15579 } 15584 }
15580 15585
15581 for_each_intel_plane(dev, plane) { 15586 for_each_intel_plane(dev, plane) {
15582 ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(state, &plane->base)); 15587 ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(state, &plane->base));
15583 if (ret) 15588 if (ret)
15584 goto err; 15589 goto err;
15585 } 15590 }
15586 15591
15587 for_each_intel_connector(dev, conn) { 15592 for_each_intel_connector(dev, conn) {
15588 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(state, &conn->base)); 15593 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(state, &conn->base));
15589 if (ret) 15594 if (ret)
15590 goto err; 15595 goto err;
15591 } 15596 }
15592 15597
15593 intel_modeset_setup_hw_state(dev); 15598 intel_modeset_setup_hw_state(dev);
15594 15599
15595 i915_redisable_vga(dev); 15600 i915_redisable_vga(dev);
15596 ret = drm_atomic_commit(state); 15601 ret = drm_atomic_commit(state);
15597 if (!ret) 15602 if (!ret)
15598 return; 15603 return;
15599 15604
15600err: 15605err:
15601 DRM_ERROR("Restoring old state failed with %i\n", ret); 15606 DRM_ERROR("Restoring old state failed with %i\n", ret);
15602 drm_atomic_state_free(state); 15607 drm_atomic_state_free(state);
15603} 15608}
15604 15609
15605void intel_modeset_gem_init(struct drm_device *dev) 15610void intel_modeset_gem_init(struct drm_device *dev)
15606{ 15611{
15607 struct drm_crtc *c; 15612 struct drm_crtc *c;
15608 struct drm_i915_gem_object *obj; 15613 struct drm_i915_gem_object *obj;
15609 int ret; 15614 int ret;
15610 15615
15611 mutex_lock(&dev->struct_mutex); 15616 mutex_lock(&dev->struct_mutex);
15612 intel_init_gt_powersave(dev); 15617 intel_init_gt_powersave(dev);
15613 mutex_unlock(&dev->struct_mutex); 15618 mutex_unlock(&dev->struct_mutex);
15614 15619
15615 intel_modeset_init_hw(dev); 15620 intel_modeset_init_hw(dev);
15616 15621
15617 intel_setup_overlay(dev); 15622 intel_setup_overlay(dev);
15618 15623
15619 /* 15624 /*
15620 * Make sure any fbs we allocated at startup are properly 15625 * Make sure any fbs we allocated at startup are properly
15621 * pinned & fenced. When we do the allocation it's too early 15626 * pinned & fenced. When we do the allocation it's too early
15622 * for this. 15627 * for this.
15623 */ 15628 */
15624 for_each_crtc(dev, c) { 15629 for_each_crtc(dev, c) {
15625 obj = intel_fb_obj(c->primary->fb); 15630 obj = intel_fb_obj(c->primary->fb);
15626 if (obj == NULL) 15631 if (obj == NULL)
15627 continue; 15632 continue;
15628 15633
15629 mutex_lock(&dev->struct_mutex); 15634 mutex_lock(&dev->struct_mutex);
15630 ret = intel_pin_and_fence_fb_obj(c->primary, 15635 ret = intel_pin_and_fence_fb_obj(c->primary,
15631 c->primary->fb, 15636 c->primary->fb,
15632 c->primary->state, 15637 c->primary->state,
15633 NULL, NULL); 15638 NULL, NULL);
15634 mutex_unlock(&dev->struct_mutex); 15639 mutex_unlock(&dev->struct_mutex);
15635 if (ret) { 15640 if (ret) {
15636 DRM_ERROR("failed to pin boot fb on pipe %d\n", 15641 DRM_ERROR("failed to pin boot fb on pipe %d\n",
15637 to_intel_crtc(c)->pipe); 15642 to_intel_crtc(c)->pipe);
15638 drm_framebuffer_unreference(c->primary->fb); 15643 drm_framebuffer_unreference(c->primary->fb);
15639 c->primary->fb = NULL; 15644 c->primary->fb = NULL;
15640 c->primary->crtc = c->primary->state->crtc = NULL; 15645 c->primary->crtc = c->primary->state->crtc = NULL;
15641 update_state_fb(c->primary); 15646 update_state_fb(c->primary);
15642 c->state->plane_mask &= ~(1 << drm_plane_index(c->primary)); 15647 c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
15643 } 15648 }
15644 } 15649 }
15645 15650
15646 intel_backlight_register(dev); 15651 intel_backlight_register(dev);
15647} 15652}
15648 15653
15649void intel_connector_unregister(struct intel_connector *intel_connector) 15654void intel_connector_unregister(struct intel_connector *intel_connector)
15650{ 15655{
15651 struct drm_connector *connector = &intel_connector->base; 15656 struct drm_connector *connector = &intel_connector->base;
15652 15657
15653 intel_panel_destroy_backlight(connector); 15658 intel_panel_destroy_backlight(connector);
15654 drm_connector_unregister(connector); 15659 drm_connector_unregister(connector);
15655} 15660}
15656 15661
15657void intel_modeset_cleanup(struct drm_device *dev) 15662void intel_modeset_cleanup(struct drm_device *dev)
15658{ 15663{
15659 struct drm_i915_private *dev_priv = dev->dev_private; 15664 struct drm_i915_private *dev_priv = dev->dev_private;
15660 struct drm_connector *connector; 15665 struct drm_connector *connector;
15661 15666
15662 intel_disable_gt_powersave(dev); 15667 intel_disable_gt_powersave(dev);
15663 15668
15664 intel_backlight_unregister(dev); 15669 intel_backlight_unregister(dev);
15665 15670
15666 /* 15671 /*
15667 * Interrupts and polling as the first thing to avoid creating havoc. 15672 * Interrupts and polling as the first thing to avoid creating havoc.
15668 * Too much stuff here (turning of connectors, ...) would 15673 * Too much stuff here (turning of connectors, ...) would
15669 * experience fancy races otherwise. 15674 * experience fancy races otherwise.
15670 */ 15675 */
15671 intel_irq_uninstall(dev_priv); 15676 intel_irq_uninstall(dev_priv);
15672 15677
15673 /* 15678 /*
15674 * Due to the hpd irq storm handling the hotplug work can re-arm the 15679 * Due to the hpd irq storm handling the hotplug work can re-arm the
15675 * poll handlers. Hence disable polling after hpd handling is shut down. 15680 * poll handlers. Hence disable polling after hpd handling is shut down.
15676 */ 15681 */
15677 drm_kms_helper_poll_fini(dev); 15682 drm_kms_helper_poll_fini(dev);
15678 15683
15679 intel_unregister_dsm_handler(); 15684 intel_unregister_dsm_handler();
15680 15685
15681 intel_fbc_disable(dev_priv); 15686 intel_fbc_disable(dev_priv);
15682 15687
15683 /* flush any delayed tasks or pending work */ 15688 /* flush any delayed tasks or pending work */
15684 flush_scheduled_work(); 15689 flush_scheduled_work();
15685 15690
15686 /* destroy the backlight and sysfs files before encoders/connectors */ 15691 /* destroy the backlight and sysfs files before encoders/connectors */
15687 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 15692 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
15688 struct intel_connector *intel_connector; 15693 struct intel_connector *intel_connector;
15689 15694
15690 intel_connector = to_intel_connector(connector); 15695 intel_connector = to_intel_connector(connector);
15691 intel_connector->unregister(intel_connector); 15696 intel_connector->unregister(intel_connector);
15692 } 15697 }
15693 15698
15694 drm_mode_config_cleanup(dev); 15699 drm_mode_config_cleanup(dev);
15695 15700
15696 intel_cleanup_overlay(dev); 15701 intel_cleanup_overlay(dev);
15697 15702
15698 mutex_lock(&dev->struct_mutex); 15703 mutex_lock(&dev->struct_mutex);
15699 intel_cleanup_gt_powersave(dev); 15704 intel_cleanup_gt_powersave(dev);
15700 mutex_unlock(&dev->struct_mutex); 15705 mutex_unlock(&dev->struct_mutex);
15701 15706
15702 intel_teardown_gmbus(dev); 15707 intel_teardown_gmbus(dev);
15703} 15708}
15704 15709
15705/* 15710/*
15706 * Return which encoder is currently attached for connector. 15711 * Return which encoder is currently attached for connector.
15707 */ 15712 */
15708struct drm_encoder *intel_best_encoder(struct drm_connector *connector) 15713struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
15709{ 15714{
15710 return &intel_attached_encoder(connector)->base; 15715 return &intel_attached_encoder(connector)->base;
15711} 15716}
15712 15717
15713void intel_connector_attach_encoder(struct intel_connector *connector, 15718void intel_connector_attach_encoder(struct intel_connector *connector,
15714 struct intel_encoder *encoder) 15719 struct intel_encoder *encoder)
15715{ 15720{
15716 connector->encoder = encoder; 15721 connector->encoder = encoder;
15717 drm_mode_connector_attach_encoder(&connector->base, 15722 drm_mode_connector_attach_encoder(&connector->base,
15718 &encoder->base); 15723 &encoder->base);
15719} 15724}
15720 15725
15721/* 15726/*
15722 * set vga decode state - true == enable VGA decode 15727 * set vga decode state - true == enable VGA decode
15723 */ 15728 */
15724int intel_modeset_vga_set_state(struct drm_device *dev, bool state) 15729int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
15725{ 15730{
15726 struct drm_i915_private *dev_priv = dev->dev_private; 15731 struct drm_i915_private *dev_priv = dev->dev_private;
15727 unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL; 15732 unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
15728 u16 gmch_ctrl; 15733 u16 gmch_ctrl;
15729 15734
15730 if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) { 15735 if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
15731 DRM_ERROR("failed to read control word\n"); 15736 DRM_ERROR("failed to read control word\n");
15732 return -EIO; 15737 return -EIO;
15733 } 15738 }
15734 15739
15735 if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state) 15740 if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
15736 return 0; 15741 return 0;
15737 15742
15738 if (state) 15743 if (state)
15739 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; 15744 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
15740 else 15745 else
15741 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; 15746 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
15742 15747
15743 if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) { 15748 if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
15744 DRM_ERROR("failed to write control word\n"); 15749 DRM_ERROR("failed to write control word\n");
15745 return -EIO; 15750 return -EIO;
15746 } 15751 }
15747 15752
15748 return 0; 15753 return 0;
15749} 15754}
15750 15755
15751struct intel_display_error_state { 15756struct intel_display_error_state {
15752 15757
15753 u32 power_well_driver; 15758 u32 power_well_driver;
15754 15759
15755 int num_transcoders; 15760 int num_transcoders;
15756 15761
15757 struct intel_cursor_error_state { 15762 struct intel_cursor_error_state {
15758 u32 control; 15763 u32 control;
15759 u32 position; 15764 u32 position;
15760 u32 base; 15765 u32 base;
15761 u32 size; 15766 u32 size;
15762 } cursor[I915_MAX_PIPES]; 15767 } cursor[I915_MAX_PIPES];
15763 15768
15764 struct intel_pipe_error_state { 15769 struct intel_pipe_error_state {
15765 bool power_domain_on; 15770 bool power_domain_on;
15766 u32 source; 15771 u32 source;
15767 u32 stat; 15772 u32 stat;
15768 } pipe[I915_MAX_PIPES]; 15773 } pipe[I915_MAX_PIPES];
15769 15774
15770 struct intel_plane_error_state { 15775 struct intel_plane_error_state {
15771 u32 control; 15776 u32 control;
15772 u32 stride; 15777 u32 stride;
15773 u32 size; 15778 u32 size;
15774 u32 pos; 15779 u32 pos;
15775 u32 addr; 15780 u32 addr;
15776 u32 surface; 15781 u32 surface;
15777 u32 tile_offset; 15782 u32 tile_offset;
15778 } plane[I915_MAX_PIPES]; 15783 } plane[I915_MAX_PIPES];
15779 15784
15780 struct intel_transcoder_error_state { 15785 struct intel_transcoder_error_state {
15781 bool power_domain_on; 15786 bool power_domain_on;
15782 enum transcoder cpu_transcoder; 15787 enum transcoder cpu_transcoder;
15783 15788
15784 u32 conf; 15789 u32 conf;
15785 15790
15786 u32 htotal; 15791 u32 htotal;
15787 u32 hblank; 15792 u32 hblank;
15788 u32 hsync; 15793 u32 hsync;
15789 u32 vtotal; 15794 u32 vtotal;
15790 u32 vblank; 15795 u32 vblank;
15791 u32 vsync; 15796 u32 vsync;
15792 } transcoder[4]; 15797 } transcoder[4];
15793}; 15798};
15794 15799
15795struct intel_display_error_state * 15800struct intel_display_error_state *
15796intel_display_capture_error_state(struct drm_device *dev) 15801intel_display_capture_error_state(struct drm_device *dev)
15797{ 15802{
15798 struct drm_i915_private *dev_priv = dev->dev_private; 15803 struct drm_i915_private *dev_priv = dev->dev_private;
15799 struct intel_display_error_state *error; 15804 struct intel_display_error_state *error;
15800 int transcoders[] = { 15805 int transcoders[] = {
15801 TRANSCODER_A, 15806 TRANSCODER_A,
15802 TRANSCODER_B, 15807 TRANSCODER_B,
15803 TRANSCODER_C, 15808 TRANSCODER_C,
15804 TRANSCODER_EDP, 15809 TRANSCODER_EDP,
15805 }; 15810 };
15806 int i; 15811 int i;
15807 15812
15808 if (INTEL_INFO(dev)->num_pipes == 0) 15813 if (INTEL_INFO(dev)->num_pipes == 0)
15809 return NULL; 15814 return NULL;
15810 15815
15811 error = kzalloc(sizeof(*error), GFP_ATOMIC); 15816 error = kzalloc(sizeof(*error), GFP_ATOMIC);
15812 if (error == NULL) 15817 if (error == NULL)
15813 return NULL; 15818 return NULL;
15814 15819
15815 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 15820 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
15816 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER); 15821 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
15817 15822
15818 for_each_pipe(dev_priv, i) { 15823 for_each_pipe(dev_priv, i) {
15819 error->pipe[i].power_domain_on = 15824 error->pipe[i].power_domain_on =
15820 __intel_display_power_is_enabled(dev_priv, 15825 __intel_display_power_is_enabled(dev_priv,
15821 POWER_DOMAIN_PIPE(i)); 15826 POWER_DOMAIN_PIPE(i));
15822 if (!error->pipe[i].power_domain_on) 15827 if (!error->pipe[i].power_domain_on)
15823 continue; 15828 continue;
15824 15829
15825 error->cursor[i].control = I915_READ(CURCNTR(i)); 15830 error->cursor[i].control = I915_READ(CURCNTR(i));
15826 error->cursor[i].position = I915_READ(CURPOS(i)); 15831 error->cursor[i].position = I915_READ(CURPOS(i));
15827 error->cursor[i].base = I915_READ(CURBASE(i)); 15832 error->cursor[i].base = I915_READ(CURBASE(i));
15828 15833
15829 error->plane[i].control = I915_READ(DSPCNTR(i)); 15834 error->plane[i].control = I915_READ(DSPCNTR(i));
15830 error->plane[i].stride = I915_READ(DSPSTRIDE(i)); 15835 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
15831 if (INTEL_INFO(dev)->gen <= 3) { 15836 if (INTEL_INFO(dev)->gen <= 3) {
15832 error->plane[i].size = I915_READ(DSPSIZE(i)); 15837 error->plane[i].size = I915_READ(DSPSIZE(i));
15833 error->plane[i].pos = I915_READ(DSPPOS(i)); 15838 error->plane[i].pos = I915_READ(DSPPOS(i));
15834 } 15839 }
15835 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) 15840 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
15836 error->plane[i].addr = I915_READ(DSPADDR(i)); 15841 error->plane[i].addr = I915_READ(DSPADDR(i));
15837 if (INTEL_INFO(dev)->gen >= 4) { 15842 if (INTEL_INFO(dev)->gen >= 4) {
15838 error->plane[i].surface = I915_READ(DSPSURF(i)); 15843 error->plane[i].surface = I915_READ(DSPSURF(i));
15839 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); 15844 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
15840 } 15845 }
15841 15846
15842 error->pipe[i].source = I915_READ(PIPESRC(i)); 15847 error->pipe[i].source = I915_READ(PIPESRC(i));
15843 15848
15844 if (HAS_GMCH_DISPLAY(dev)) 15849 if (HAS_GMCH_DISPLAY(dev))
15845 error->pipe[i].stat = I915_READ(PIPESTAT(i)); 15850 error->pipe[i].stat = I915_READ(PIPESTAT(i));
15846 } 15851 }
15847 15852
15848 error->num_transcoders = INTEL_INFO(dev)->num_pipes; 15853 error->num_transcoders = INTEL_INFO(dev)->num_pipes;
15849 if (HAS_DDI(dev_priv->dev)) 15854 if (HAS_DDI(dev_priv->dev))
15850 error->num_transcoders++; /* Account for eDP. */ 15855 error->num_transcoders++; /* Account for eDP. */
15851 15856
15852 for (i = 0; i < error->num_transcoders; i++) { 15857 for (i = 0; i < error->num_transcoders; i++) {
15853 enum transcoder cpu_transcoder = transcoders[i]; 15858 enum transcoder cpu_transcoder = transcoders[i];
15854 15859
15855 error->transcoder[i].power_domain_on = 15860 error->transcoder[i].power_domain_on =
15856 __intel_display_power_is_enabled(dev_priv, 15861 __intel_display_power_is_enabled(dev_priv,
15857 POWER_DOMAIN_TRANSCODER(cpu_transcoder)); 15862 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
15858 if (!error->transcoder[i].power_domain_on) 15863 if (!error->transcoder[i].power_domain_on)
15859 continue; 15864 continue;
15860 15865
15861 error->transcoder[i].cpu_transcoder = cpu_transcoder; 15866 error->transcoder[i].cpu_transcoder = cpu_transcoder;
15862 15867
15863 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder)); 15868 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
15864 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder)); 15869 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
15865 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder)); 15870 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
15866 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder)); 15871 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
15867 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder)); 15872 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
15868 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder)); 15873 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
15869 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder)); 15874 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
15870 } 15875 }
15871 15876
15872 return error; 15877 return error;
15873} 15878}
15874 15879
15875#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__) 15880#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
15876 15881
15877void 15882void
15878intel_display_print_error_state(struct drm_i915_error_state_buf *m, 15883intel_display_print_error_state(struct drm_i915_error_state_buf *m,
15879 struct drm_device *dev, 15884 struct drm_device *dev,
15880 struct intel_display_error_state *error) 15885 struct intel_display_error_state *error)
15881{ 15886{
15882 struct drm_i915_private *dev_priv = dev->dev_private; 15887 struct drm_i915_private *dev_priv = dev->dev_private;
15883 int i; 15888 int i;
15884 15889
15885 if (!error) 15890 if (!error)
15886 return; 15891 return;
15887 15892
15888 err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes); 15893 err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
15889 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 15894 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
15890 err_printf(m, "PWR_WELL_CTL2: %08x\n", 15895 err_printf(m, "PWR_WELL_CTL2: %08x\n",
15891 error->power_well_driver); 15896 error->power_well_driver);
15892 for_each_pipe(dev_priv, i) { 15897 for_each_pipe(dev_priv, i) {
15893 err_printf(m, "Pipe [%d]:\n", i); 15898 err_printf(m, "Pipe [%d]:\n", i);
15894 err_printf(m, " Power: %s\n", 15899 err_printf(m, " Power: %s\n",
15895 error->pipe[i].power_domain_on ? "on" : "off"); 15900 error->pipe[i].power_domain_on ? "on" : "off");
15896 err_printf(m, " SRC: %08x\n", error->pipe[i].source); 15901 err_printf(m, " SRC: %08x\n", error->pipe[i].source);
15897 err_printf(m, " STAT: %08x\n", error->pipe[i].stat); 15902 err_printf(m, " STAT: %08x\n", error->pipe[i].stat);
15898 15903
15899 err_printf(m, "Plane [%d]:\n", i); 15904 err_printf(m, "Plane [%d]:\n", i);
15900 err_printf(m, " CNTR: %08x\n", error->plane[i].control); 15905 err_printf(m, " CNTR: %08x\n", error->plane[i].control);
15901 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride); 15906 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
15902 if (INTEL_INFO(dev)->gen <= 3) { 15907 if (INTEL_INFO(dev)->gen <= 3) {
15903 err_printf(m, " SIZE: %08x\n", error->plane[i].size); 15908 err_printf(m, " SIZE: %08x\n", error->plane[i].size);
15904 err_printf(m, " POS: %08x\n", error->plane[i].pos); 15909 err_printf(m, " POS: %08x\n", error->plane[i].pos);
15905 } 15910 }
15906 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) 15911 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
15907 err_printf(m, " ADDR: %08x\n", error->plane[i].addr); 15912 err_printf(m, " ADDR: %08x\n", error->plane[i].addr);
15908 if (INTEL_INFO(dev)->gen >= 4) { 15913 if (INTEL_INFO(dev)->gen >= 4) {
15909 err_printf(m, " SURF: %08x\n", error->plane[i].surface); 15914 err_printf(m, " SURF: %08x\n", error->plane[i].surface);
15910 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset); 15915 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
15911 } 15916 }
15912 15917
15913 err_printf(m, "Cursor [%d]:\n", i); 15918 err_printf(m, "Cursor [%d]:\n", i);
15914 err_printf(m, " CNTR: %08x\n", error->cursor[i].control); 15919 err_printf(m, " CNTR: %08x\n", error->cursor[i].control);
15915 err_printf(m, " POS: %08x\n", error->cursor[i].position); 15920 err_printf(m, " POS: %08x\n", error->cursor[i].position);
15916 err_printf(m, " BASE: %08x\n", error->cursor[i].base); 15921 err_printf(m, " BASE: %08x\n", error->cursor[i].base);
15917 } 15922 }
15918 15923
15919 for (i = 0; i < error->num_transcoders; i++) { 15924 for (i = 0; i < error->num_transcoders; i++) {
15920 err_printf(m, "CPU transcoder: %c\n", 15925 err_printf(m, "CPU transcoder: %c\n",
15921 transcoder_name(error->transcoder[i].cpu_transcoder)); 15926 transcoder_name(error->transcoder[i].cpu_transcoder));
15922 err_printf(m, " Power: %s\n", 15927 err_printf(m, " Power: %s\n",
15923 error->transcoder[i].power_domain_on ? "on" : "off"); 15928 error->transcoder[i].power_domain_on ? "on" : "off");
15924 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf); 15929 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
15925 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal); 15930 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
15926 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank); 15931 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank);
15927 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync); 15932 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync);
15928 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal); 15933 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal);
15929 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank); 15934 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank);
15930 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync); 15935 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);
15931 } 15936 }
15932} 15937}
15933 15938
15934void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file) 15939void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file)
15935{ 15940{
15936 struct intel_crtc *crtc; 15941 struct intel_crtc *crtc;
15937 15942
15938 for_each_intel_crtc(dev, crtc) { 15943 for_each_intel_crtc(dev, crtc) {
15939 struct intel_unpin_work *work; 15944 struct intel_unpin_work *work;
15940 15945
15941 spin_lock_irq(&dev->event_lock); 15946 spin_lock_irq(&dev->event_lock);
15942 15947
15943 work = crtc->unpin_work; 15948 work = crtc->unpin_work;
15944 15949
15945 if (work && work->event && 15950 if (work && work->event &&
15946 work->event->base.file_priv == file) { 15951 work->event->base.file_priv == file) {
15947 kfree(work->event); 15952 kfree(work->event);
15948 work->event = NULL; 15953 work->event = NULL;
15949 } 15954 }
15950 15955
15951 spin_unlock_irq(&dev->event_lock); 15956 spin_unlock_irq(&dev->event_lock);
15952 } 15957 }
15953} 15958}

cvs diff -r1.11 -r1.12 src/sys/external/bsd/drm2/dist/drm/i915/Attic/intel_dp.c (switch to unified diff)

--- src/sys/external/bsd/drm2/dist/drm/i915/Attic/intel_dp.c 2018/08/27 04:58:24 1.11
+++ src/sys/external/bsd/drm2/dist/drm/i915/Attic/intel_dp.c 2018/08/27 06:16:01 1.12
@@ -1,1609 +1,1613 @@ @@ -1,1609 +1,1613 @@
1/* $NetBSD: intel_dp.c,v 1.11 2018/08/27 04:58:24 riastradh Exp $ */ 1/* $NetBSD: intel_dp.c,v 1.12 2018/08/27 06:16:01 riastradh Exp $ */
2 2
3/* 3/*
4 * Copyright © 2008 Intel Corporation 4 * Copyright © 2008 Intel Corporation
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"), 7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation 8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the 10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions: 11 * Software is furnished to do so, subject to the following conditions:
12 * 12 *
13 * The above copyright notice and this permission notice (including the next 13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the 14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software. 15 * Software.
16 * 16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 * IN THE SOFTWARE. 23 * IN THE SOFTWARE.
24 * 24 *
25 * Authors: 25 * Authors:
26 * Keith Packard <keithp@keithp.com> 26 * Keith Packard <keithp@keithp.com>
27 * 27 *
28 */ 28 */
29 29
30#include <sys/cdefs.h> 30#include <sys/cdefs.h>
31__KERNEL_RCSID(0, "$NetBSD: intel_dp.c,v 1.11 2018/08/27 04:58:24 riastradh Exp $"); 31__KERNEL_RCSID(0, "$NetBSD: intel_dp.c,v 1.12 2018/08/27 06:16:01 riastradh Exp $");
32 32
33#include <linux/i2c.h> 33#include <linux/i2c.h>
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/export.h> 35#include <linux/export.h>
36#include <linux/notifier.h> 36#include <linux/notifier.h>
37#include <linux/reboot.h> 37#include <linux/reboot.h>
38#include <linux/err.h> 38#include <linux/err.h>
39#include <linux/module.h> 39#include <linux/module.h>
40#include <linux/printk.h> 40#include <linux/printk.h>
41#include <linux/pm_qos.h> 41#include <linux/pm_qos.h>
42#include <drm/drmP.h> 42#include <drm/drmP.h>
43#include <drm/drm_atomic_helper.h> 43#include <drm/drm_atomic_helper.h>
44#include <drm/drm_crtc.h> 44#include <drm/drm_crtc.h>
45#include <drm/drm_crtc_helper.h> 45#include <drm/drm_crtc_helper.h>
46#include <drm/drm_edid.h> 46#include <drm/drm_edid.h>
47#include "intel_drv.h" 47#include "intel_drv.h"
48#include <drm/i915_drm.h> 48#include <drm/i915_drm.h>
49#include "i915_drv.h" 49#include "i915_drv.h"
50 50
51#define DP_LINK_CHECK_TIMEOUT (10 * 1000) 51#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
52 52
53/* Compliance test status bits */ 53/* Compliance test status bits */
54#define INTEL_DP_RESOLUTION_SHIFT_MASK 0 54#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
55#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK) 55#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
56#define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK) 56#define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
57#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK) 57#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
58 58
59struct dp_link_dpll { 59struct dp_link_dpll {
60 int clock; 60 int clock;
61 struct dpll dpll; 61 struct dpll dpll;
62}; 62};
63 63
64static const struct dp_link_dpll gen4_dpll[] = { 64static const struct dp_link_dpll gen4_dpll[] = {
65 { 162000, 65 { 162000,
66 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } }, 66 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
67 { 270000, 67 { 270000,
68 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } } 68 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
69}; 69};
70 70
71static const struct dp_link_dpll pch_dpll[] = { 71static const struct dp_link_dpll pch_dpll[] = {
72 { 162000, 72 { 162000,
73 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } }, 73 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
74 { 270000, 74 { 270000,
75 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } } 75 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
76}; 76};
77 77
78static const struct dp_link_dpll vlv_dpll[] = { 78static const struct dp_link_dpll vlv_dpll[] = {
79 { 162000, 79 { 162000,
80 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } }, 80 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
81 { 270000, 81 { 270000,
82 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } } 82 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
83}; 83};
84 84
85/* 85/*
86 * CHV supports eDP 1.4 that have more link rates. 86 * CHV supports eDP 1.4 that have more link rates.
87 * Below only provides the fixed rate but exclude variable rate. 87 * Below only provides the fixed rate but exclude variable rate.
88 */ 88 */
89static const struct dp_link_dpll chv_dpll[] = { 89static const struct dp_link_dpll chv_dpll[] = {
90 /* 90 /*
91 * CHV requires to program fractional division for m2. 91 * CHV requires to program fractional division for m2.
92 * m2 is stored in fixed point format using formula below 92 * m2 is stored in fixed point format using formula below
93 * (m2_int << 22) | m2_fraction 93 * (m2_int << 22) | m2_fraction
94 */ 94 */
95 { 162000, /* m2_int = 32, m2_fraction = 1677722 */ 95 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
96 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } }, 96 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
97 { 270000, /* m2_int = 27, m2_fraction = 0 */ 97 { 270000, /* m2_int = 27, m2_fraction = 0 */
98 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }, 98 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
99 { 540000, /* m2_int = 27, m2_fraction = 0 */ 99 { 540000, /* m2_int = 27, m2_fraction = 0 */
100 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } } 100 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
101}; 101};
102 102
103static const int bxt_rates[] = { 162000, 216000, 243000, 270000, 103static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
104 324000, 432000, 540000 }; 104 324000, 432000, 540000 };
105static const int skl_rates[] = { 162000, 216000, 270000, 105static const int skl_rates[] = { 162000, 216000, 270000,
106 324000, 432000, 540000 }; 106 324000, 432000, 540000 };
107static const int default_rates[] = { 162000, 270000, 540000 }; 107static const int default_rates[] = { 162000, 270000, 540000 };
108 108
109/** 109/**
110 * is_edp - is the given port attached to an eDP panel (either CPU or PCH) 110 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
111 * @intel_dp: DP struct 111 * @intel_dp: DP struct
112 * 112 *
113 * If a CPU or PCH DP output is attached to an eDP panel, this function 113 * If a CPU or PCH DP output is attached to an eDP panel, this function
114 * will return true, and false otherwise. 114 * will return true, and false otherwise.
115 */ 115 */
116static bool is_edp(struct intel_dp *intel_dp) 116static bool is_edp(struct intel_dp *intel_dp)
117{ 117{
118 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 118 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
119 119
120 return intel_dig_port->base.type == INTEL_OUTPUT_EDP; 120 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
121} 121}
122 122
123static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp) 123static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
124{ 124{
125 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 125 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
126 126
127 return intel_dig_port->base.base.dev; 127 return intel_dig_port->base.base.dev;
128} 128}
129 129
130static struct intel_dp *intel_attached_dp(struct drm_connector *connector) 130static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
131{ 131{
132 return enc_to_intel_dp(&intel_attached_encoder(connector)->base); 132 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
133} 133}
134 134
135static void intel_dp_link_down(struct intel_dp *intel_dp); 135static void intel_dp_link_down(struct intel_dp *intel_dp);
136static bool edp_panel_vdd_on(struct intel_dp *intel_dp); 136static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
137static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); 137static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
138static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp); 138static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
139static void vlv_steal_power_sequencer(struct drm_device *dev, 139static void vlv_steal_power_sequencer(struct drm_device *dev,
140 enum pipe pipe); 140 enum pipe pipe);
141 141
142static unsigned int intel_dp_unused_lane_mask(int lane_count) 142static unsigned int intel_dp_unused_lane_mask(int lane_count)
143{ 143{
144 return ~((1 << lane_count) - 1) & 0xf; 144 return ~((1 << lane_count) - 1) & 0xf;
145} 145}
146 146
147static int 147static int
148intel_dp_max_link_bw(struct intel_dp *intel_dp) 148intel_dp_max_link_bw(struct intel_dp *intel_dp)
149{ 149{
150 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE]; 150 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
151 151
152 switch (max_link_bw) { 152 switch (max_link_bw) {
153 case DP_LINK_BW_1_62: 153 case DP_LINK_BW_1_62:
154 case DP_LINK_BW_2_7: 154 case DP_LINK_BW_2_7:
155 case DP_LINK_BW_5_4: 155 case DP_LINK_BW_5_4:
156 break; 156 break;
157 default: 157 default:
158 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n", 158 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
159 max_link_bw); 159 max_link_bw);
160 max_link_bw = DP_LINK_BW_1_62; 160 max_link_bw = DP_LINK_BW_1_62;
161 break; 161 break;
162 } 162 }
163 return max_link_bw; 163 return max_link_bw;
164} 164}
165 165
166static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp) 166static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
167{ 167{
168 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 168 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
169 struct drm_device *dev = intel_dig_port->base.base.dev; 169 struct drm_device *dev = intel_dig_port->base.base.dev;
170 u8 source_max, sink_max; 170 u8 source_max, sink_max;
171 171
172 source_max = 4; 172 source_max = 4;
173 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A && 173 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
174 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0) 174 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
175 source_max = 2; 175 source_max = 2;
176 176
177 sink_max = drm_dp_max_lane_count(intel_dp->dpcd); 177 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
178 178
179 return min(source_max, sink_max); 179 return min(source_max, sink_max);
180} 180}
181 181
182/* 182/*
183 * The units on the numbers in the next two are... bizarre. Examples will 183 * The units on the numbers in the next two are... bizarre. Examples will
184 * make it clearer; this one parallels an example in the eDP spec. 184 * make it clearer; this one parallels an example in the eDP spec.
185 * 185 *
186 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as: 186 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
187 * 187 *
188 * 270000 * 1 * 8 / 10 == 216000 188 * 270000 * 1 * 8 / 10 == 216000
189 * 189 *
190 * The actual data capacity of that configuration is 2.16Gbit/s, so the 190 * The actual data capacity of that configuration is 2.16Gbit/s, so the
191 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz - 191 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
192 * or equivalently, kilopixels per second - so for 1680x1050R it'd be 192 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
193 * 119000. At 18bpp that's 2142000 kilobits per second. 193 * 119000. At 18bpp that's 2142000 kilobits per second.
194 * 194 *
195 * Thus the strange-looking division by 10 in intel_dp_link_required, to 195 * Thus the strange-looking division by 10 in intel_dp_link_required, to
196 * get the result in decakilobits instead of kilobits. 196 * get the result in decakilobits instead of kilobits.
197 */ 197 */
198 198
199static int 199static int
200intel_dp_link_required(int pixel_clock, int bpp) 200intel_dp_link_required(int pixel_clock, int bpp)
201{ 201{
202 return (pixel_clock * bpp + 9) / 10; 202 return (pixel_clock * bpp + 9) / 10;
203} 203}
204 204
205static int 205static int
206intel_dp_max_data_rate(int max_link_clock, int max_lanes) 206intel_dp_max_data_rate(int max_link_clock, int max_lanes)
207{ 207{
208 return (max_link_clock * max_lanes * 8) / 10; 208 return (max_link_clock * max_lanes * 8) / 10;
209} 209}
210 210
211static enum drm_mode_status 211static enum drm_mode_status
212intel_dp_mode_valid(struct drm_connector *connector, 212intel_dp_mode_valid(struct drm_connector *connector,
213 struct drm_display_mode *mode) 213 struct drm_display_mode *mode)
214{ 214{
215 struct intel_dp *intel_dp = intel_attached_dp(connector); 215 struct intel_dp *intel_dp = intel_attached_dp(connector);
216 struct intel_connector *intel_connector = to_intel_connector(connector); 216 struct intel_connector *intel_connector = to_intel_connector(connector);
217 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; 217 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
218 int target_clock = mode->clock; 218 int target_clock = mode->clock;
219 int max_rate, mode_rate, max_lanes, max_link_clock; 219 int max_rate, mode_rate, max_lanes, max_link_clock;
220 220
221 if (is_edp(intel_dp) && fixed_mode) { 221 if (is_edp(intel_dp) && fixed_mode) {
222 if (mode->hdisplay > fixed_mode->hdisplay) 222 if (mode->hdisplay > fixed_mode->hdisplay)
223 return MODE_PANEL; 223 return MODE_PANEL;
224 224
225 if (mode->vdisplay > fixed_mode->vdisplay) 225 if (mode->vdisplay > fixed_mode->vdisplay)
226 return MODE_PANEL; 226 return MODE_PANEL;
227 227
228 target_clock = fixed_mode->clock; 228 target_clock = fixed_mode->clock;
229 } 229 }
230 230
231 max_link_clock = intel_dp_max_link_rate(intel_dp); 231 max_link_clock = intel_dp_max_link_rate(intel_dp);
232 max_lanes = intel_dp_max_lane_count(intel_dp); 232 max_lanes = intel_dp_max_lane_count(intel_dp);
233 233
234 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); 234 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
235 mode_rate = intel_dp_link_required(target_clock, 18); 235 mode_rate = intel_dp_link_required(target_clock, 18);
236 236
237 if (mode_rate > max_rate) 237 if (mode_rate > max_rate)
238 return MODE_CLOCK_HIGH; 238 return MODE_CLOCK_HIGH;
239 239
240 if (mode->clock < 10000) 240 if (mode->clock < 10000)
241 return MODE_CLOCK_LOW; 241 return MODE_CLOCK_LOW;
242 242
243 if (mode->flags & DRM_MODE_FLAG_DBLCLK) 243 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
244 return MODE_H_ILLEGAL; 244 return MODE_H_ILLEGAL;
245 245
246 return MODE_OK; 246 return MODE_OK;
247} 247}
248 248
249uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes) 249uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
250{ 250{
251 int i; 251 int i;
252 uint32_t v = 0; 252 uint32_t v = 0;
253 253
254 if (src_bytes > 4) 254 if (src_bytes > 4)
255 src_bytes = 4; 255 src_bytes = 4;
256 for (i = 0; i < src_bytes; i++) 256 for (i = 0; i < src_bytes; i++)
257 v |= ((uint32_t) src[i]) << ((3-i) * 8); 257 v |= ((uint32_t) src[i]) << ((3-i) * 8);
258 return v; 258 return v;
259} 259}
260 260
261static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes) 261static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
262{ 262{
263 int i; 263 int i;
264 if (dst_bytes > 4) 264 if (dst_bytes > 4)
265 dst_bytes = 4; 265 dst_bytes = 4;
266 for (i = 0; i < dst_bytes; i++) 266 for (i = 0; i < dst_bytes; i++)
267 dst[i] = src >> ((3-i) * 8); 267 dst[i] = src >> ((3-i) * 8);
268} 268}
269 269
270static void 270static void
271intel_dp_init_panel_power_sequencer(struct drm_device *dev, 271intel_dp_init_panel_power_sequencer(struct drm_device *dev,
272 struct intel_dp *intel_dp); 272 struct intel_dp *intel_dp);
273static void 273static void
274intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, 274intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
275 struct intel_dp *intel_dp); 275 struct intel_dp *intel_dp);
276 276
277static void pps_lock(struct intel_dp *intel_dp) 277static void pps_lock(struct intel_dp *intel_dp)
278{ 278{
279 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 279 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
280 struct intel_encoder *encoder = &intel_dig_port->base; 280 struct intel_encoder *encoder = &intel_dig_port->base;
281 struct drm_device *dev = encoder->base.dev; 281 struct drm_device *dev = encoder->base.dev;
282 struct drm_i915_private *dev_priv = dev->dev_private; 282 struct drm_i915_private *dev_priv = dev->dev_private;
283 enum intel_display_power_domain power_domain; 283 enum intel_display_power_domain power_domain;
284 284
285 /* 285 /*
286 * See vlv_power_sequencer_reset() why we need 286 * See vlv_power_sequencer_reset() why we need
287 * a power domain reference here. 287 * a power domain reference here.
288 */ 288 */
289 power_domain = intel_display_port_aux_power_domain(encoder); 289 power_domain = intel_display_port_aux_power_domain(encoder);
290 intel_display_power_get(dev_priv, power_domain); 290 intel_display_power_get(dev_priv, power_domain);
291 291
292 mutex_lock(&dev_priv->pps_mutex); 292 mutex_lock(&dev_priv->pps_mutex);
293} 293}
294 294
295static void pps_unlock(struct intel_dp *intel_dp) 295static void pps_unlock(struct intel_dp *intel_dp)
296{ 296{
297 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 297 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
298 struct intel_encoder *encoder = &intel_dig_port->base; 298 struct intel_encoder *encoder = &intel_dig_port->base;
299 struct drm_device *dev = encoder->base.dev; 299 struct drm_device *dev = encoder->base.dev;
300 struct drm_i915_private *dev_priv = dev->dev_private; 300 struct drm_i915_private *dev_priv = dev->dev_private;
301 enum intel_display_power_domain power_domain; 301 enum intel_display_power_domain power_domain;
302 302
303 mutex_unlock(&dev_priv->pps_mutex); 303 mutex_unlock(&dev_priv->pps_mutex);
304 304
305 power_domain = intel_display_port_aux_power_domain(encoder); 305 power_domain = intel_display_port_aux_power_domain(encoder);
306 intel_display_power_put(dev_priv, power_domain); 306 intel_display_power_put(dev_priv, power_domain);
307} 307}
308 308
309static void 309static void
310vlv_power_sequencer_kick(struct intel_dp *intel_dp) 310vlv_power_sequencer_kick(struct intel_dp *intel_dp)
311{ 311{
312 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 312 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
313 struct drm_device *dev = intel_dig_port->base.base.dev; 313 struct drm_device *dev = intel_dig_port->base.base.dev;
314 struct drm_i915_private *dev_priv = dev->dev_private; 314 struct drm_i915_private *dev_priv = dev->dev_private;
315 enum pipe pipe = intel_dp->pps_pipe; 315 enum pipe pipe = intel_dp->pps_pipe;
316 bool pll_enabled, release_cl_override = false; 316 bool pll_enabled, release_cl_override = false;
317 enum dpio_phy phy = DPIO_PHY(pipe); 317 enum dpio_phy phy = DPIO_PHY(pipe);
318 enum dpio_channel ch = vlv_pipe_to_channel(pipe); 318 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
319 uint32_t DP; 319 uint32_t DP;
320 320
321 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN, 321 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
322 "skipping pipe %c power seqeuncer kick due to port %c being active\n", 322 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
323 pipe_name(pipe), port_name(intel_dig_port->port))) 323 pipe_name(pipe), port_name(intel_dig_port->port)))
324 return; 324 return;
325 325
326 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n", 326 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
327 pipe_name(pipe), port_name(intel_dig_port->port)); 327 pipe_name(pipe), port_name(intel_dig_port->port));
328 328
329 /* Preserve the BIOS-computed detected bit. This is 329 /* Preserve the BIOS-computed detected bit. This is
330 * supposed to be read-only. 330 * supposed to be read-only.
331 */ 331 */
332 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED; 332 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
333 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 333 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
334 DP |= DP_PORT_WIDTH(1); 334 DP |= DP_PORT_WIDTH(1);
335 DP |= DP_LINK_TRAIN_PAT_1; 335 DP |= DP_LINK_TRAIN_PAT_1;
336 336
337 if (IS_CHERRYVIEW(dev)) 337 if (IS_CHERRYVIEW(dev))
338 DP |= DP_PIPE_SELECT_CHV(pipe); 338 DP |= DP_PIPE_SELECT_CHV(pipe);
339 else if (pipe == PIPE_B) 339 else if (pipe == PIPE_B)
340 DP |= DP_PIPEB_SELECT; 340 DP |= DP_PIPEB_SELECT;
341 341
342 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE; 342 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
343 343
344 /* 344 /*
345 * The DPLL for the pipe must be enabled for this to work. 345 * The DPLL for the pipe must be enabled for this to work.
346 * So enable temporarily it if it's not already enabled. 346 * So enable temporarily it if it's not already enabled.
347 */ 347 */
348 if (!pll_enabled) { 348 if (!pll_enabled) {
349 release_cl_override = IS_CHERRYVIEW(dev) && 349 release_cl_override = IS_CHERRYVIEW(dev) &&
350 !chv_phy_powergate_ch(dev_priv, phy, ch, true); 350 !chv_phy_powergate_ch(dev_priv, phy, ch, true);
351 351
352 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ? 352 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
353 &chv_dpll[0].dpll : &vlv_dpll[0].dpll); 353 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
354 } 354 }
355 355
356 /* 356 /*
357 * Similar magic as in intel_dp_enable_port(). 357 * Similar magic as in intel_dp_enable_port().
358 * We _must_ do this port enable + disable trick 358 * We _must_ do this port enable + disable trick
359 * to make this power seqeuencer lock onto the port. 359 * to make this power seqeuencer lock onto the port.
360 * Otherwise even VDD force bit won't work. 360 * Otherwise even VDD force bit won't work.
361 */ 361 */
362 I915_WRITE(intel_dp->output_reg, DP); 362 I915_WRITE(intel_dp->output_reg, DP);
363 POSTING_READ(intel_dp->output_reg); 363 POSTING_READ(intel_dp->output_reg);
364 364
365 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN); 365 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
366 POSTING_READ(intel_dp->output_reg); 366 POSTING_READ(intel_dp->output_reg);
367 367
368 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); 368 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
369 POSTING_READ(intel_dp->output_reg); 369 POSTING_READ(intel_dp->output_reg);
370 370
371 if (!pll_enabled) { 371 if (!pll_enabled) {
372 vlv_force_pll_off(dev, pipe); 372 vlv_force_pll_off(dev, pipe);
373 373
374 if (release_cl_override) 374 if (release_cl_override)
375 chv_phy_powergate_ch(dev_priv, phy, ch, false); 375 chv_phy_powergate_ch(dev_priv, phy, ch, false);
376 } 376 }
377} 377}
378 378
379static enum i915_pipe 379static enum i915_pipe
380vlv_power_sequencer_pipe(struct intel_dp *intel_dp) 380vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
381{ 381{
382 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 382 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
383 struct drm_device *dev = intel_dig_port->base.base.dev; 383 struct drm_device *dev = intel_dig_port->base.base.dev;
384 struct drm_i915_private *dev_priv = dev->dev_private; 384 struct drm_i915_private *dev_priv = dev->dev_private;
385 struct intel_encoder *encoder; 385 struct intel_encoder *encoder;
386 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B); 386 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
387 enum i915_pipe pipe; 387 enum i915_pipe pipe;
388 388
389 lockdep_assert_held(&dev_priv->pps_mutex); 389 lockdep_assert_held(&dev_priv->pps_mutex);
390 390
391 /* We should never land here with regular DP ports */ 391 /* We should never land here with regular DP ports */
392 WARN_ON(!is_edp(intel_dp)); 392 WARN_ON(!is_edp(intel_dp));
393 393
394 if (intel_dp->pps_pipe != INVALID_PIPE) 394 if (intel_dp->pps_pipe != INVALID_PIPE)
395 return intel_dp->pps_pipe; 395 return intel_dp->pps_pipe;
396 396
397 /* 397 /*
398 * We don't have power sequencer currently. 398 * We don't have power sequencer currently.
399 * Pick one that's not used by other ports. 399 * Pick one that's not used by other ports.
400 */ 400 */
401 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 401 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
402 base.head) { 402 base.head) {
403 struct intel_dp *tmp; 403 struct intel_dp *tmp;
404 404
405 if (encoder->type != INTEL_OUTPUT_EDP) 405 if (encoder->type != INTEL_OUTPUT_EDP)
406 continue; 406 continue;
407 407
408 tmp = enc_to_intel_dp(&encoder->base); 408 tmp = enc_to_intel_dp(&encoder->base);
409 409
410 if (tmp->pps_pipe != INVALID_PIPE) 410 if (tmp->pps_pipe != INVALID_PIPE)
411 pipes &= ~(1 << tmp->pps_pipe); 411 pipes &= ~(1 << tmp->pps_pipe);
412 } 412 }
413 413
414 /* 414 /*
415 * Didn't find one. This should not happen since there 415 * Didn't find one. This should not happen since there
416 * are two power sequencers and up to two eDP ports. 416 * are two power sequencers and up to two eDP ports.
417 */ 417 */
418 if (WARN_ON(pipes == 0)) 418 if (WARN_ON(pipes == 0))
419 pipe = PIPE_A; 419 pipe = PIPE_A;
420 else 420 else
421 pipe = ffs(pipes) - 1; 421 pipe = ffs(pipes) - 1;
422 422
423 vlv_steal_power_sequencer(dev, pipe); 423 vlv_steal_power_sequencer(dev, pipe);
424 intel_dp->pps_pipe = pipe; 424 intel_dp->pps_pipe = pipe;
425 425
426 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n", 426 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
427 pipe_name(intel_dp->pps_pipe), 427 pipe_name(intel_dp->pps_pipe),
428 port_name(intel_dig_port->port)); 428 port_name(intel_dig_port->port));
429 429
430 /* init power sequencer on this pipe and port */ 430 /* init power sequencer on this pipe and port */
431 intel_dp_init_panel_power_sequencer(dev, intel_dp); 431 intel_dp_init_panel_power_sequencer(dev, intel_dp);
432 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp); 432 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
433 433
434 /* 434 /*
435 * Even vdd force doesn't work until we've made 435 * Even vdd force doesn't work until we've made
436 * the power sequencer lock in on the port. 436 * the power sequencer lock in on the port.
437 */ 437 */
438 vlv_power_sequencer_kick(intel_dp); 438 vlv_power_sequencer_kick(intel_dp);
439 439
440 return intel_dp->pps_pipe; 440 return intel_dp->pps_pipe;
441} 441}
442 442
443typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv, 443typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
444 enum pipe pipe); 444 enum pipe pipe);
445 445
446static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv, 446static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
447 enum pipe pipe) 447 enum pipe pipe)
448{ 448{
449 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON; 449 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
450} 450}
451 451
452static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv, 452static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
453 enum pipe pipe) 453 enum pipe pipe)
454{ 454{
455 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD; 455 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
456} 456}
457 457
458static bool vlv_pipe_any(struct drm_i915_private *dev_priv, 458static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
459 enum pipe pipe) 459 enum pipe pipe)
460{ 460{
461 return true; 461 return true;
462} 462}
463 463
464static enum pipe 464static enum pipe
465vlv_initial_pps_pipe(struct drm_i915_private *dev_priv, 465vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
466 enum port port, 466 enum port port,
467 vlv_pipe_check pipe_check) 467 vlv_pipe_check pipe_check)
468{ 468{
469 enum pipe pipe; 469 enum pipe pipe;
470 470
471 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) { 471 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
472 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) & 472 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
473 PANEL_PORT_SELECT_MASK; 473 PANEL_PORT_SELECT_MASK;
474 474
475 if (port_sel != PANEL_PORT_SELECT_VLV(port)) 475 if (port_sel != PANEL_PORT_SELECT_VLV(port))
476 continue; 476 continue;
477 477
478 if (!pipe_check(dev_priv, pipe)) 478 if (!pipe_check(dev_priv, pipe))
479 continue; 479 continue;
480 480
481 return pipe; 481 return pipe;
482 } 482 }
483 483
484 return INVALID_PIPE; 484 return INVALID_PIPE;
485} 485}
486 486
487static void 487static void
488vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp) 488vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
489{ 489{
490 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 490 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
491 struct drm_device *dev = intel_dig_port->base.base.dev; 491 struct drm_device *dev = intel_dig_port->base.base.dev;
492 struct drm_i915_private *dev_priv = dev->dev_private; 492 struct drm_i915_private *dev_priv = dev->dev_private;
493 enum port port = intel_dig_port->port; 493 enum port port = intel_dig_port->port;
494 494
495 lockdep_assert_held(&dev_priv->pps_mutex); 495 lockdep_assert_held(&dev_priv->pps_mutex);
496 496
497 /* try to find a pipe with this port selected */ 497 /* try to find a pipe with this port selected */
498 /* first pick one where the panel is on */ 498 /* first pick one where the panel is on */
499 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port, 499 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
500 vlv_pipe_has_pp_on); 500 vlv_pipe_has_pp_on);
501 /* didn't find one? pick one where vdd is on */ 501 /* didn't find one? pick one where vdd is on */
502 if (intel_dp->pps_pipe == INVALID_PIPE) 502 if (intel_dp->pps_pipe == INVALID_PIPE)
503 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port, 503 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
504 vlv_pipe_has_vdd_on); 504 vlv_pipe_has_vdd_on);
505 /* didn't find one? pick one with just the correct port */ 505 /* didn't find one? pick one with just the correct port */
506 if (intel_dp->pps_pipe == INVALID_PIPE) 506 if (intel_dp->pps_pipe == INVALID_PIPE)
507 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port, 507 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
508 vlv_pipe_any); 508 vlv_pipe_any);
509 509
510 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */ 510 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
511 if (intel_dp->pps_pipe == INVALID_PIPE) { 511 if (intel_dp->pps_pipe == INVALID_PIPE) {
512 DRM_DEBUG_KMS("no initial power sequencer for port %c\n", 512 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
513 port_name(port)); 513 port_name(port));
514 return; 514 return;
515 } 515 }
516 516
517 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n", 517 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
518 port_name(port), pipe_name(intel_dp->pps_pipe)); 518 port_name(port), pipe_name(intel_dp->pps_pipe));
519 519
520 intel_dp_init_panel_power_sequencer(dev, intel_dp); 520 intel_dp_init_panel_power_sequencer(dev, intel_dp);
521 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp); 521 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
522} 522}
523 523
524void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv) 524void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
525{ 525{
526 struct drm_device *dev = dev_priv->dev; 526 struct drm_device *dev = dev_priv->dev;
527 struct intel_encoder *encoder; 527 struct intel_encoder *encoder;
528 528
529 if (WARN_ON(!IS_VALLEYVIEW(dev))) 529 if (WARN_ON(!IS_VALLEYVIEW(dev)))
530 return; 530 return;
531 531
532 /* 532 /*
533 * We can't grab pps_mutex here due to deadlock with power_domain 533 * We can't grab pps_mutex here due to deadlock with power_domain
534 * mutex when power_domain functions are called while holding pps_mutex. 534 * mutex when power_domain functions are called while holding pps_mutex.
535 * That also means that in order to use pps_pipe the code needs to 535 * That also means that in order to use pps_pipe the code needs to
536 * hold both a power domain reference and pps_mutex, and the power domain 536 * hold both a power domain reference and pps_mutex, and the power domain
537 * reference get/put must be done while _not_ holding pps_mutex. 537 * reference get/put must be done while _not_ holding pps_mutex.
538 * pps_{lock,unlock}() do these steps in the correct order, so one 538 * pps_{lock,unlock}() do these steps in the correct order, so one
539 * should use them always. 539 * should use them always.
540 */ 540 */
541 541
542 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { 542 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
543 struct intel_dp *intel_dp; 543 struct intel_dp *intel_dp;
544 544
545 if (encoder->type != INTEL_OUTPUT_EDP) 545 if (encoder->type != INTEL_OUTPUT_EDP)
546 continue; 546 continue;
547 547
548 intel_dp = enc_to_intel_dp(&encoder->base); 548 intel_dp = enc_to_intel_dp(&encoder->base);
549 intel_dp->pps_pipe = INVALID_PIPE; 549 intel_dp->pps_pipe = INVALID_PIPE;
550 } 550 }
551} 551}
552 552
553static u32 _pp_ctrl_reg(struct intel_dp *intel_dp) 553static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
554{ 554{
555 struct drm_device *dev = intel_dp_to_dev(intel_dp); 555 struct drm_device *dev = intel_dp_to_dev(intel_dp);
556 556
557 if (IS_BROXTON(dev)) 557 if (IS_BROXTON(dev))
558 return BXT_PP_CONTROL(0); 558 return BXT_PP_CONTROL(0);
559 else if (HAS_PCH_SPLIT(dev)) 559 else if (HAS_PCH_SPLIT(dev))
560 return PCH_PP_CONTROL; 560 return PCH_PP_CONTROL;
561 else 561 else
562 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp)); 562 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
563} 563}
564 564
565static u32 _pp_stat_reg(struct intel_dp *intel_dp) 565static u32 _pp_stat_reg(struct intel_dp *intel_dp)
566{ 566{
567 struct drm_device *dev = intel_dp_to_dev(intel_dp); 567 struct drm_device *dev = intel_dp_to_dev(intel_dp);
568 568
569 if (IS_BROXTON(dev)) 569 if (IS_BROXTON(dev))
570 return BXT_PP_STATUS(0); 570 return BXT_PP_STATUS(0);
571 else if (HAS_PCH_SPLIT(dev)) 571 else if (HAS_PCH_SPLIT(dev))
572 return PCH_PP_STATUS; 572 return PCH_PP_STATUS;
573 else 573 else
574 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp)); 574 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
575} 575}
576 576
577/* Reboot notifier handler to shutdown panel power to guarantee T12 timing 577/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
578 This function only applicable when panel PM state is not to be tracked */ 578 This function only applicable when panel PM state is not to be tracked */
579static int edp_notify_handler(struct notifier_block *this, unsigned long code, 579static int edp_notify_handler(struct notifier_block *this, unsigned long code,
580 void *unused) 580 void *unused)
581{ 581{
 582#ifdef __NetBSD__
 583 panic("XXX");
 584#else
582 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp), 585 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
583 edp_notifier); 586 edp_notifier);
584 struct drm_device *dev = intel_dp_to_dev(intel_dp); 587 struct drm_device *dev = intel_dp_to_dev(intel_dp);
585 struct drm_i915_private *dev_priv = dev->dev_private; 588 struct drm_i915_private *dev_priv = dev->dev_private;
586 589
587 if (!is_edp(intel_dp) || code != SYS_RESTART) 590 if (!is_edp(intel_dp) || code != SYS_RESTART)
588 return 0; 591 return 0;
589 592
590 pps_lock(intel_dp); 593 pps_lock(intel_dp);
591 594
592 if (IS_VALLEYVIEW(dev)) { 595 if (IS_VALLEYVIEW(dev)) {
593 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp); 596 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
594 u32 pp_ctrl_reg, pp_div_reg; 597 u32 pp_ctrl_reg, pp_div_reg;
595 u32 pp_div; 598 u32 pp_div;
596 599
597 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe); 600 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
598 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe); 601 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
599 pp_div = I915_READ(pp_div_reg); 602 pp_div = I915_READ(pp_div_reg);
600 pp_div &= PP_REFERENCE_DIVIDER_MASK; 603 pp_div &= PP_REFERENCE_DIVIDER_MASK;
601 604
602 /* 0x1F write to PP_DIV_REG sets max cycle delay */ 605 /* 0x1F write to PP_DIV_REG sets max cycle delay */
603 I915_WRITE(pp_div_reg, pp_div | 0x1F); 606 I915_WRITE(pp_div_reg, pp_div | 0x1F);
604 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF); 607 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
605 msleep(intel_dp->panel_power_cycle_delay); 608 msleep(intel_dp->panel_power_cycle_delay);
606 } 609 }
607 610
608 pps_unlock(intel_dp); 611 pps_unlock(intel_dp);
609 612
610 return 0; 613 return 0;
 614#endif
611} 615}
612 616
613static bool edp_have_panel_power(struct intel_dp *intel_dp) 617static bool edp_have_panel_power(struct intel_dp *intel_dp)
614{ 618{
615 struct drm_device *dev = intel_dp_to_dev(intel_dp); 619 struct drm_device *dev = intel_dp_to_dev(intel_dp);
616 struct drm_i915_private *dev_priv = dev->dev_private; 620 struct drm_i915_private *dev_priv = dev->dev_private;
617 621
618 lockdep_assert_held(&dev_priv->pps_mutex); 622 lockdep_assert_held(&dev_priv->pps_mutex);
619 623
620 if (IS_VALLEYVIEW(dev) && 624 if (IS_VALLEYVIEW(dev) &&
621 intel_dp->pps_pipe == INVALID_PIPE) 625 intel_dp->pps_pipe == INVALID_PIPE)
622 return false; 626 return false;
623 627
624 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0; 628 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
625} 629}
626 630
627static bool edp_have_panel_vdd(struct intel_dp *intel_dp) 631static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
628{ 632{
629 struct drm_device *dev = intel_dp_to_dev(intel_dp); 633 struct drm_device *dev = intel_dp_to_dev(intel_dp);
630 struct drm_i915_private *dev_priv = dev->dev_private; 634 struct drm_i915_private *dev_priv = dev->dev_private;
631 635
632 lockdep_assert_held(&dev_priv->pps_mutex); 636 lockdep_assert_held(&dev_priv->pps_mutex);
633 637
634 if (IS_VALLEYVIEW(dev) && 638 if (IS_VALLEYVIEW(dev) &&
635 intel_dp->pps_pipe == INVALID_PIPE) 639 intel_dp->pps_pipe == INVALID_PIPE)
636 return false; 640 return false;
637 641
638 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD; 642 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
639} 643}
640 644
641static void 645static void
642intel_dp_check_edp(struct intel_dp *intel_dp) 646intel_dp_check_edp(struct intel_dp *intel_dp)
643{ 647{
644 struct drm_device *dev = intel_dp_to_dev(intel_dp); 648 struct drm_device *dev = intel_dp_to_dev(intel_dp);
645 struct drm_i915_private *dev_priv = dev->dev_private; 649 struct drm_i915_private *dev_priv = dev->dev_private;
646 650
647 if (!is_edp(intel_dp)) 651 if (!is_edp(intel_dp))
648 return; 652 return;
649 653
650 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) { 654 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
651 WARN(1, "eDP powered off while attempting aux channel communication.\n"); 655 WARN(1, "eDP powered off while attempting aux channel communication.\n");
652 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n", 656 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
653 I915_READ(_pp_stat_reg(intel_dp)), 657 I915_READ(_pp_stat_reg(intel_dp)),
654 I915_READ(_pp_ctrl_reg(intel_dp))); 658 I915_READ(_pp_ctrl_reg(intel_dp)));
655 } 659 }
656} 660}
657 661
658static uint32_t 662static uint32_t
659intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq) 663intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
660{ 664{
661 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 665 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
662 struct drm_device *dev = intel_dig_port->base.base.dev; 666 struct drm_device *dev = intel_dig_port->base.base.dev;
663 struct drm_i915_private *dev_priv = dev->dev_private; 667 struct drm_i915_private *dev_priv = dev->dev_private;
664 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg; 668 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
665 uint32_t status; 669 uint32_t status;
666 bool done; 670 bool done;
667 671
668#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) 672#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
669#ifdef __NetBSD__ 673#ifdef __NetBSD__
670 if (has_aux_irq && !cold) { 674 if (has_aux_irq && !cold) {
671 int ret; 675 int ret;
672 spin_lock(&dev_priv->gmbus_wait_lock); 676 spin_lock(&dev_priv->gmbus_wait_lock);
673 DRM_SPIN_TIMED_WAIT_NOINTR_UNTIL(ret, 677 DRM_SPIN_TIMED_WAIT_NOINTR_UNTIL(ret,
674 &dev_priv->gmbus_wait_queue, &dev_priv->gmbus_wait_lock, 678 &dev_priv->gmbus_wait_queue, &dev_priv->gmbus_wait_lock,
675 msecs_to_jiffies_timeout(10), 679 msecs_to_jiffies_timeout(10),
676 C); 680 C);
677 if (ret < 0) /* Failure: pretend same as done. */ 681 if (ret < 0) /* Failure: pretend same as done. */
678 done = true; 682 done = true;
679 else if (ret == 0) /* Timed out: not done. */ 683 else if (ret == 0) /* Timed out: not done. */
680 done = false; 684 done = false;
681 else /* Succeeded (ret > 0): done. */ 685 else /* Succeeded (ret > 0): done. */
682 done = true; 686 done = true;
683 spin_unlock(&dev_priv->gmbus_wait_lock); 687 spin_unlock(&dev_priv->gmbus_wait_lock);
684 } else { 688 } else {
685 done = wait_for_atomic(C, 10) == 0; 689 done = wait_for_atomic(C, 10) == 0;
686 } 690 }
687#else 691#else
688 if (has_aux_irq) 692 if (has_aux_irq)
689 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, 693 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
690 msecs_to_jiffies_timeout(10)); 694 msecs_to_jiffies_timeout(10));
691 else 695 else
692 done = wait_for_atomic(C, 10) == 0; 696 done = wait_for_atomic(C, 10) == 0;
693#endif 697#endif
694 if (!done) 698 if (!done)
695 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n", 699 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
696 has_aux_irq); 700 has_aux_irq);
697#undef C 701#undef C
698 702
699 return status; 703 return status;
700} 704}
701 705
702static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 706static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
703{ 707{
704 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 708 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
705 struct drm_device *dev = intel_dig_port->base.base.dev; 709 struct drm_device *dev = intel_dig_port->base.base.dev;
706 710
707 /* 711 /*
708 * The clock divider is based off the hrawclk, and would like to run at 712 * The clock divider is based off the hrawclk, and would like to run at
709 * 2MHz. So, take the hrawclk value and divide by 2 and use that 713 * 2MHz. So, take the hrawclk value and divide by 2 and use that
710 */ 714 */
711 return index ? 0 : intel_hrawclk(dev) / 2; 715 return index ? 0 : intel_hrawclk(dev) / 2;
712} 716}
713 717
714static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 718static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
715{ 719{
716 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 720 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
717 struct drm_device *dev = intel_dig_port->base.base.dev; 721 struct drm_device *dev = intel_dig_port->base.base.dev;
718 struct drm_i915_private *dev_priv = dev->dev_private; 722 struct drm_i915_private *dev_priv = dev->dev_private;
719 723
720 if (index) 724 if (index)
721 return 0; 725 return 0;
722 726
723 if (intel_dig_port->port == PORT_A) { 727 if (intel_dig_port->port == PORT_A) {
724 return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000); 728 return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
725 729
726 } else { 730 } else {
727 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2); 731 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
728 } 732 }
729} 733}
730 734
731static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 735static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
732{ 736{
733 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 737 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
734 struct drm_device *dev = intel_dig_port->base.base.dev; 738 struct drm_device *dev = intel_dig_port->base.base.dev;
735 struct drm_i915_private *dev_priv = dev->dev_private; 739 struct drm_i915_private *dev_priv = dev->dev_private;
736 740
737 if (intel_dig_port->port == PORT_A) { 741 if (intel_dig_port->port == PORT_A) {
738 if (index) 742 if (index)
739 return 0; 743 return 0;
740 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000); 744 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
741 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { 745 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
742 /* Workaround for non-ULT HSW */ 746 /* Workaround for non-ULT HSW */
743 switch (index) { 747 switch (index) {
744 case 0: return 63; 748 case 0: return 63;
745 case 1: return 72; 749 case 1: return 72;
746 default: return 0; 750 default: return 0;
747 } 751 }
748 } else { 752 } else {
749 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2); 753 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
750 } 754 }
751} 755}
752 756
753static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 757static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
754{ 758{
755 return index ? 0 : 100; 759 return index ? 0 : 100;
756} 760}
757 761
758static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 762static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
759{ 763{
760 /* 764 /*
761 * SKL doesn't need us to program the AUX clock divider (Hardware will 765 * SKL doesn't need us to program the AUX clock divider (Hardware will
762 * derive the clock from CDCLK automatically). We still implement the 766 * derive the clock from CDCLK automatically). We still implement the
763 * get_aux_clock_divider vfunc to plug-in into the existing code. 767 * get_aux_clock_divider vfunc to plug-in into the existing code.
764 */ 768 */
765 return index ? 0 : 1; 769 return index ? 0 : 1;
766} 770}
767 771
768static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp, 772static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
769 bool has_aux_irq, 773 bool has_aux_irq,
770 int send_bytes, 774 int send_bytes,
771 uint32_t aux_clock_divider) 775 uint32_t aux_clock_divider)
772{ 776{
773 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 777 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
774 struct drm_device *dev = intel_dig_port->base.base.dev; 778 struct drm_device *dev = intel_dig_port->base.base.dev;
775 uint32_t precharge, timeout; 779 uint32_t precharge, timeout;
776 780
777 if (IS_GEN6(dev)) 781 if (IS_GEN6(dev))
778 precharge = 3; 782 precharge = 3;
779 else 783 else
780 precharge = 5; 784 precharge = 5;
781 785
782 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL) 786 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
783 timeout = DP_AUX_CH_CTL_TIME_OUT_600us; 787 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
784 else 788 else
785 timeout = DP_AUX_CH_CTL_TIME_OUT_400us; 789 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
786 790
787 return DP_AUX_CH_CTL_SEND_BUSY | 791 return DP_AUX_CH_CTL_SEND_BUSY |
788 DP_AUX_CH_CTL_DONE | 792 DP_AUX_CH_CTL_DONE |
789 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) | 793 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
790 DP_AUX_CH_CTL_TIME_OUT_ERROR | 794 DP_AUX_CH_CTL_TIME_OUT_ERROR |
791 timeout | 795 timeout |
792 DP_AUX_CH_CTL_RECEIVE_ERROR | 796 DP_AUX_CH_CTL_RECEIVE_ERROR |
793 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 797 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
794 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 798 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
795 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT); 799 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
796} 800}
797 801
798static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp, 802static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
799 bool has_aux_irq, 803 bool has_aux_irq,
800 int send_bytes, 804 int send_bytes,
801 uint32_t unused) 805 uint32_t unused)
802{ 806{
803 return DP_AUX_CH_CTL_SEND_BUSY | 807 return DP_AUX_CH_CTL_SEND_BUSY |
804 DP_AUX_CH_CTL_DONE | 808 DP_AUX_CH_CTL_DONE |
805 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) | 809 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
806 DP_AUX_CH_CTL_TIME_OUT_ERROR | 810 DP_AUX_CH_CTL_TIME_OUT_ERROR |
807 DP_AUX_CH_CTL_TIME_OUT_1600us | 811 DP_AUX_CH_CTL_TIME_OUT_1600us |
808 DP_AUX_CH_CTL_RECEIVE_ERROR | 812 DP_AUX_CH_CTL_RECEIVE_ERROR |
809 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 813 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
810 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32); 814 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
811} 815}
812 816
813static int 817static int
814intel_dp_aux_ch(struct intel_dp *intel_dp, 818intel_dp_aux_ch(struct intel_dp *intel_dp,
815 const uint8_t *send, int send_bytes, 819 const uint8_t *send, int send_bytes,
816 uint8_t *recv, int recv_size) 820 uint8_t *recv, int recv_size)
817{ 821{
818 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 822 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
819 struct drm_device *dev = intel_dig_port->base.base.dev; 823 struct drm_device *dev = intel_dig_port->base.base.dev;
820 struct drm_i915_private *dev_priv = dev->dev_private; 824 struct drm_i915_private *dev_priv = dev->dev_private;
821 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg; 825 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
822 uint32_t ch_data = ch_ctl + 4; 826 uint32_t ch_data = ch_ctl + 4;
823 uint32_t aux_clock_divider; 827 uint32_t aux_clock_divider;
824 int i, ret, recv_bytes; 828 int i, ret, recv_bytes;
825 uint32_t status; 829 uint32_t status;
826 int try, clock = 0; 830 int try, clock = 0;
827 bool has_aux_irq = HAS_AUX_IRQ(dev); 831 bool has_aux_irq = HAS_AUX_IRQ(dev);
828 bool vdd; 832 bool vdd;
829 833
830 pps_lock(intel_dp); 834 pps_lock(intel_dp);
831 835
832 /* 836 /*
833 * We will be called with VDD already enabled for dpcd/edid/oui reads. 837 * We will be called with VDD already enabled for dpcd/edid/oui reads.
834 * In such cases we want to leave VDD enabled and it's up to upper layers 838 * In such cases we want to leave VDD enabled and it's up to upper layers
835 * to turn it off. But for eg. i2c-dev access we need to turn it on/off 839 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
836 * ourselves. 840 * ourselves.
837 */ 841 */
838 vdd = edp_panel_vdd_on(intel_dp); 842 vdd = edp_panel_vdd_on(intel_dp);
839 843
840 /* dp aux is extremely sensitive to irq latency, hence request the 844 /* dp aux is extremely sensitive to irq latency, hence request the
841 * lowest possible wakeup latency and so prevent the cpu from going into 845 * lowest possible wakeup latency and so prevent the cpu from going into
842 * deep sleep states. 846 * deep sleep states.
843 */ 847 */
844 pm_qos_update_request(&dev_priv->pm_qos, 0); 848 pm_qos_update_request(&dev_priv->pm_qos, 0);
845 849
846 intel_dp_check_edp(intel_dp); 850 intel_dp_check_edp(intel_dp);
847 851
848 /* Try to wait for any previous AUX channel activity */ 852 /* Try to wait for any previous AUX channel activity */
849 for (try = 0; try < 3; try++) { 853 for (try = 0; try < 3; try++) {
850 status = I915_READ_NOTRACE(ch_ctl); 854 status = I915_READ_NOTRACE(ch_ctl);
851 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) 855 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
852 break; 856 break;
853 msleep(1); 857 msleep(1);
854 } 858 }
855 859
856 if (try == 3) { 860 if (try == 3) {
857 static u32 last_status = -1; 861 static u32 last_status = -1;
858 const u32 status = I915_READ(ch_ctl); 862 const u32 status = I915_READ(ch_ctl);
859 863
860 if (status != last_status) { 864 if (status != last_status) {
861 WARN(1, "dp_aux_ch not started status 0x%08x\n", 865 WARN(1, "dp_aux_ch not started status 0x%08x\n",
862 status); 866 status);
863 last_status = status; 867 last_status = status;
864 } 868 }
865 869
866 ret = -EBUSY; 870 ret = -EBUSY;
867 goto out; 871 goto out;
868 } 872 }
869 873
870 /* Only 5 data registers! */ 874 /* Only 5 data registers! */
871 if (WARN_ON(send_bytes > 20 || recv_size > 20)) { 875 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
872 ret = -E2BIG; 876 ret = -E2BIG;
873 goto out; 877 goto out;
874 } 878 }
875 879
876 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) { 880 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
877 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp, 881 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
878 has_aux_irq, 882 has_aux_irq,
879 send_bytes, 883 send_bytes,
880 aux_clock_divider); 884 aux_clock_divider);
881 885
882 /* Must try at least 3 times according to DP spec */ 886 /* Must try at least 3 times according to DP spec */
883 for (try = 0; try < 5; try++) { 887 for (try = 0; try < 5; try++) {
884 /* Load the send data into the aux channel data registers */ 888 /* Load the send data into the aux channel data registers */
885 for (i = 0; i < send_bytes; i += 4) 889 for (i = 0; i < send_bytes; i += 4)
886 I915_WRITE(ch_data + i, 890 I915_WRITE(ch_data + i,
887 intel_dp_pack_aux(send + i, 891 intel_dp_pack_aux(send + i,
888 send_bytes - i)); 892 send_bytes - i));
889 893
890 /* Send the command and wait for it to complete */ 894 /* Send the command and wait for it to complete */
891 I915_WRITE(ch_ctl, send_ctl); 895 I915_WRITE(ch_ctl, send_ctl);
892 896
893 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq); 897 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
894 898
895 /* Clear done status and any errors */ 899 /* Clear done status and any errors */
896 I915_WRITE(ch_ctl, 900 I915_WRITE(ch_ctl,
897 status | 901 status |
898 DP_AUX_CH_CTL_DONE | 902 DP_AUX_CH_CTL_DONE |
899 DP_AUX_CH_CTL_TIME_OUT_ERROR | 903 DP_AUX_CH_CTL_TIME_OUT_ERROR |
900 DP_AUX_CH_CTL_RECEIVE_ERROR); 904 DP_AUX_CH_CTL_RECEIVE_ERROR);
901 905
902 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) 906 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
903 continue; 907 continue;
904 908
905 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2 909 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
906 * 400us delay required for errors and timeouts 910 * 400us delay required for errors and timeouts
907 * Timeout errors from the HW already meet this 911 * Timeout errors from the HW already meet this
908 * requirement so skip to next iteration 912 * requirement so skip to next iteration
909 */ 913 */
910 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 914 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
911 usleep_range(400, 500); 915 usleep_range(400, 500);
912 continue; 916 continue;
913 } 917 }
914 if (status & DP_AUX_CH_CTL_DONE) 918 if (status & DP_AUX_CH_CTL_DONE)
915 goto done; 919 goto done;
916 } 920 }
917 } 921 }
918 922
919 if ((status & DP_AUX_CH_CTL_DONE) == 0) { 923 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
920 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status); 924 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
921 ret = -EBUSY; 925 ret = -EBUSY;
922 goto out; 926 goto out;
923 } 927 }
924 928
925done: 929done:
926 /* Check for timeout or receive error. 930 /* Check for timeout or receive error.
927 * Timeouts occur when the sink is not connected 931 * Timeouts occur when the sink is not connected
928 */ 932 */
929 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 933 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
930 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status); 934 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
931 ret = -EIO; 935 ret = -EIO;
932 goto out; 936 goto out;
933 } 937 }
934 938
935 /* Timeouts occur when the device isn't connected, so they're 939 /* Timeouts occur when the device isn't connected, so they're
936 * "normal" -- don't fill the kernel log with these */ 940 * "normal" -- don't fill the kernel log with these */
937 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { 941 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
938 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status); 942 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
939 ret = -ETIMEDOUT; 943 ret = -ETIMEDOUT;
940 goto out; 944 goto out;
941 } 945 }
942 946
943 /* Unload any bytes sent back from the other side */ 947 /* Unload any bytes sent back from the other side */
944 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> 948 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
945 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); 949 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
946 if (recv_bytes > recv_size) 950 if (recv_bytes > recv_size)
947 recv_bytes = recv_size; 951 recv_bytes = recv_size;
948 952
949 for (i = 0; i < recv_bytes; i += 4) 953 for (i = 0; i < recv_bytes; i += 4)
950 intel_dp_unpack_aux(I915_READ(ch_data + i), 954 intel_dp_unpack_aux(I915_READ(ch_data + i),
951 recv + i, recv_bytes - i); 955 recv + i, recv_bytes - i);
952 956
953 ret = recv_bytes; 957 ret = recv_bytes;
954out: 958out:
955 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE); 959 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
956 960
957 if (vdd) 961 if (vdd)
958 edp_panel_vdd_off(intel_dp, false); 962 edp_panel_vdd_off(intel_dp, false);
959 963
960 pps_unlock(intel_dp); 964 pps_unlock(intel_dp);
961 965
962 return ret; 966 return ret;
963} 967}
964 968
965#define BARE_ADDRESS_SIZE 3 969#define BARE_ADDRESS_SIZE 3
966#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1) 970#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
967static ssize_t 971static ssize_t
968intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) 972intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
969{ 973{
970 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux); 974 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
971 uint8_t txbuf[20], rxbuf[20]; 975 uint8_t txbuf[20], rxbuf[20];
972 size_t txsize, rxsize; 976 size_t txsize, rxsize;
973 int ret; 977 int ret;
974 978
975 txbuf[0] = (msg->request << 4) | 979 txbuf[0] = (msg->request << 4) |
976 ((msg->address >> 16) & 0xf); 980 ((msg->address >> 16) & 0xf);
977 txbuf[1] = (msg->address >> 8) & 0xff; 981 txbuf[1] = (msg->address >> 8) & 0xff;
978 txbuf[2] = msg->address & 0xff; 982 txbuf[2] = msg->address & 0xff;
979 txbuf[3] = msg->size - 1; 983 txbuf[3] = msg->size - 1;
980 984
981 switch (msg->request & ~DP_AUX_I2C_MOT) { 985 switch (msg->request & ~DP_AUX_I2C_MOT) {
982 case DP_AUX_NATIVE_WRITE: 986 case DP_AUX_NATIVE_WRITE:
983 case DP_AUX_I2C_WRITE: 987 case DP_AUX_I2C_WRITE:
984 case DP_AUX_I2C_WRITE_STATUS_UPDATE: 988 case DP_AUX_I2C_WRITE_STATUS_UPDATE:
985 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE; 989 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
986 rxsize = 2; /* 0 or 1 data bytes */ 990 rxsize = 2; /* 0 or 1 data bytes */
987 991
988 if (WARN_ON(txsize > 20)) 992 if (WARN_ON(txsize > 20))
989 return -E2BIG; 993 return -E2BIG;
990 994
991 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size); 995 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
992 996
993 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize); 997 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
994 if (ret > 0) { 998 if (ret > 0) {
995 msg->reply = rxbuf[0] >> 4; 999 msg->reply = rxbuf[0] >> 4;
996 1000
997 if (ret > 1) { 1001 if (ret > 1) {
998 /* Number of bytes written in a short write. */ 1002 /* Number of bytes written in a short write. */
999 ret = clamp_t(int, rxbuf[1], 0, msg->size); 1003 ret = clamp_t(int, rxbuf[1], 0, msg->size);
1000 } else { 1004 } else {
1001 /* Return payload size. */ 1005 /* Return payload size. */
1002 ret = msg->size; 1006 ret = msg->size;
1003 } 1007 }
1004 } 1008 }
1005 break; 1009 break;
1006 1010
1007 case DP_AUX_NATIVE_READ: 1011 case DP_AUX_NATIVE_READ:
1008 case DP_AUX_I2C_READ: 1012 case DP_AUX_I2C_READ:
1009 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE; 1013 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1010 rxsize = msg->size + 1; 1014 rxsize = msg->size + 1;
1011 1015
1012 if (WARN_ON(rxsize > 20)) 1016 if (WARN_ON(rxsize > 20))
1013 return -E2BIG; 1017 return -E2BIG;
1014 1018
1015 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize); 1019 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1016 if (ret > 0) { 1020 if (ret > 0) {
1017 msg->reply = rxbuf[0] >> 4; 1021 msg->reply = rxbuf[0] >> 4;
1018 /* 1022 /*
1019 * Assume happy day, and copy the data. The caller is 1023 * Assume happy day, and copy the data. The caller is
1020 * expected to check msg->reply before touching it. 1024 * expected to check msg->reply before touching it.
1021 * 1025 *
1022 * Return payload size. 1026 * Return payload size.
1023 */ 1027 */
1024 ret--; 1028 ret--;
1025 memcpy(msg->buffer, rxbuf + 1, ret); 1029 memcpy(msg->buffer, rxbuf + 1, ret);
1026 } 1030 }
1027 break; 1031 break;
1028 1032
1029 default: 1033 default:
1030 ret = -EINVAL; 1034 ret = -EINVAL;
1031 break; 1035 break;
1032 } 1036 }
1033 1037
1034 return ret; 1038 return ret;
1035} 1039}
1036 1040
1037static void 1041static void
1038intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector) 1042intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1039{ 1043{
1040 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1044 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1041 struct drm_i915_private *dev_priv = dev->dev_private; 1045 struct drm_i915_private *dev_priv = dev->dev_private;
1042 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1046 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1043 enum port port = intel_dig_port->port; 1047 enum port port = intel_dig_port->port;
1044 struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port]; 1048 struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
1045 const char *name = NULL; 1049 const char *name = NULL;
1046 uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL; 1050 uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1047 int ret; 1051 int ret;
1048 1052
1049 /* On SKL we don't have Aux for port E so we rely on VBT to set 1053 /* On SKL we don't have Aux for port E so we rely on VBT to set
1050 * a proper alternate aux channel. 1054 * a proper alternate aux channel.
1051 */ 1055 */
1052 if (IS_SKYLAKE(dev) && port == PORT_E) { 1056 if (IS_SKYLAKE(dev) && port == PORT_E) {
1053 switch (info->alternate_aux_channel) { 1057 switch (info->alternate_aux_channel) {
1054 case DP_AUX_B: 1058 case DP_AUX_B:
1055 porte_aux_ctl_reg = DPB_AUX_CH_CTL; 1059 porte_aux_ctl_reg = DPB_AUX_CH_CTL;
1056 break; 1060 break;
1057 case DP_AUX_C: 1061 case DP_AUX_C:
1058 porte_aux_ctl_reg = DPC_AUX_CH_CTL; 1062 porte_aux_ctl_reg = DPC_AUX_CH_CTL;
1059 break; 1063 break;
1060 case DP_AUX_D: 1064 case DP_AUX_D:
1061 porte_aux_ctl_reg = DPD_AUX_CH_CTL; 1065 porte_aux_ctl_reg = DPD_AUX_CH_CTL;
1062 break; 1066 break;
1063 case DP_AUX_A: 1067 case DP_AUX_A:
1064 default: 1068 default:
1065 porte_aux_ctl_reg = DPA_AUX_CH_CTL; 1069 porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1066 } 1070 }
1067 } 1071 }
1068 1072
1069 switch (port) { 1073 switch (port) {
1070 case PORT_A: 1074 case PORT_A:
1071 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL; 1075 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
1072 name = "DPDDC-A"; 1076 name = "DPDDC-A";
1073 break; 1077 break;
1074 case PORT_B: 1078 case PORT_B:
1075 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL; 1079 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1076 name = "DPDDC-B"; 1080 name = "DPDDC-B";
1077 break; 1081 break;
1078 case PORT_C: 1082 case PORT_C:
1079 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL; 1083 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1080 name = "DPDDC-C"; 1084 name = "DPDDC-C";
1081 break; 1085 break;
1082 case PORT_D: 1086 case PORT_D:
1083 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL; 1087 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
1084 name = "DPDDC-D"; 1088 name = "DPDDC-D";
1085 break; 1089 break;
1086 case PORT_E: 1090 case PORT_E:
1087 intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg; 1091 intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg;
1088 name = "DPDDC-E"; 1092 name = "DPDDC-E";
1089 break; 1093 break;
1090 default: 1094 default:
1091 BUG(); 1095 BUG();
1092 } 1096 }
1093 1097
1094 /* 1098 /*
1095 * The AUX_CTL register is usually DP_CTL + 0x10. 1099 * The AUX_CTL register is usually DP_CTL + 0x10.
1096 * 1100 *
1097 * On Haswell and Broadwell though: 1101 * On Haswell and Broadwell though:
1098 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU 1102 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1099 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU 1103 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1100 * 1104 *
1101 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU. 1105 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1102 */ 1106 */
1103 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E) 1107 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E)
1104 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10; 1108 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
1105 1109
1106 intel_dp->aux.name = name; 1110 intel_dp->aux.name = name;
1107 intel_dp->aux.dev = dev->dev; 1111 intel_dp->aux.dev = dev->dev;
1108 intel_dp->aux.transfer = intel_dp_aux_transfer; 1112 intel_dp->aux.transfer = intel_dp_aux_transfer;
1109 1113
1110#ifdef __NetBSD__ 1114#ifdef __NetBSD__
1111 DRM_DEBUG_KMS("registering %s bus for %s\n", name, 1115 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1112 device_xname(connector->base.dev->dev)); 1116 device_xname(connector->base.dev->dev));
1113#else 1117#else
1114 DRM_DEBUG_KMS("registering %s bus for %s\n", name, 1118 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1115 connector->base.kdev->kobj.name); 1119 connector->base.kdev->kobj.name);
1116#endif 1120#endif
1117 1121
1118 ret = drm_dp_aux_register(&intel_dp->aux); 1122 ret = drm_dp_aux_register(&intel_dp->aux);
1119 if (ret < 0) { 1123 if (ret < 0) {
1120 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n", 1124 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1121 name, ret); 1125 name, ret);
1122 return; 1126 return;
1123 } 1127 }
1124 1128
1125#ifndef __NetBSD__ 1129#ifndef __NetBSD__
1126 ret = sysfs_create_link(&connector->base.kdev->kobj, 1130 ret = sysfs_create_link(&connector->base.kdev->kobj,
1127 &intel_dp->aux.ddc.dev.kobj, 1131 &intel_dp->aux.ddc.dev.kobj,
1128 intel_dp->aux.ddc.dev.kobj.name); 1132 intel_dp->aux.ddc.dev.kobj.name);
1129 if (ret < 0) { 1133 if (ret < 0) {
1130 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret); 1134 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
1131 drm_dp_aux_unregister(&intel_dp->aux); 1135 drm_dp_aux_unregister(&intel_dp->aux);
1132 } 1136 }
1133#endif 1137#endif
1134} 1138}
1135 1139
1136static void 1140static void
1137intel_dp_connector_unregister(struct intel_connector *intel_connector) 1141intel_dp_connector_unregister(struct intel_connector *intel_connector)
1138{ 1142{
1139#ifndef __NetBSD__ 1143#ifndef __NetBSD__
1140 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base); 1144 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1141#endif 1145#endif
1142 1146
1143#ifndef __NetBSD__ 1147#ifndef __NetBSD__
1144 if (!intel_connector->mst_port) 1148 if (!intel_connector->mst_port)
1145 sysfs_remove_link(&intel_connector->base.kdev->kobj, 1149 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1146 intel_dp->aux.ddc.dev.kobj.name); 1150 intel_dp->aux.ddc.dev.kobj.name);
1147#endif 1151#endif
1148 intel_connector_unregister(intel_connector); 1152 intel_connector_unregister(intel_connector);
1149} 1153}
1150 1154
1151static void 1155static void
1152skl_edp_set_pll_config(struct intel_crtc_state *pipe_config) 1156skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
1153{ 1157{
1154 u32 ctrl1; 1158 u32 ctrl1;
1155 1159
1156 memset(&pipe_config->dpll_hw_state, 0, 1160 memset(&pipe_config->dpll_hw_state, 0,
1157 sizeof(pipe_config->dpll_hw_state)); 1161 sizeof(pipe_config->dpll_hw_state));
1158 1162
1159 pipe_config->ddi_pll_sel = SKL_DPLL0; 1163 pipe_config->ddi_pll_sel = SKL_DPLL0;
1160 pipe_config->dpll_hw_state.cfgcr1 = 0; 1164 pipe_config->dpll_hw_state.cfgcr1 = 0;
1161 pipe_config->dpll_hw_state.cfgcr2 = 0; 1165 pipe_config->dpll_hw_state.cfgcr2 = 0;
1162 1166
1163 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0); 1167 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1164 switch (pipe_config->port_clock / 2) { 1168 switch (pipe_config->port_clock / 2) {
1165 case 81000: 1169 case 81000:
1166 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 1170 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
1167 SKL_DPLL0); 1171 SKL_DPLL0);
1168 break; 1172 break;
1169 case 135000: 1173 case 135000:
1170 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 1174 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
1171 SKL_DPLL0); 1175 SKL_DPLL0);
1172 break; 1176 break;
1173 case 270000: 1177 case 270000:
1174 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 1178 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
1175 SKL_DPLL0); 1179 SKL_DPLL0);
1176 break; 1180 break;
1177 case 162000: 1181 case 162000:
1178 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 1182 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
1179 SKL_DPLL0); 1183 SKL_DPLL0);
1180 break; 1184 break;
1181 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which 1185 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1182 results in CDCLK change. Need to handle the change of CDCLK by 1186 results in CDCLK change. Need to handle the change of CDCLK by
1183 disabling pipes and re-enabling them */ 1187 disabling pipes and re-enabling them */
1184 case 108000: 1188 case 108000:
1185 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 1189 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
1186 SKL_DPLL0); 1190 SKL_DPLL0);
1187 break; 1191 break;
1188 case 216000: 1192 case 216000:
1189 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 1193 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
1190 SKL_DPLL0); 1194 SKL_DPLL0);
1191 break; 1195 break;
1192 1196
1193 } 1197 }
1194 pipe_config->dpll_hw_state.ctrl1 = ctrl1; 1198 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1195} 1199}
1196 1200
1197void 1201void
1198hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config) 1202hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
1199{ 1203{
1200 memset(&pipe_config->dpll_hw_state, 0, 1204 memset(&pipe_config->dpll_hw_state, 0,
1201 sizeof(pipe_config->dpll_hw_state)); 1205 sizeof(pipe_config->dpll_hw_state));
1202 1206
1203 switch (pipe_config->port_clock / 2) { 1207 switch (pipe_config->port_clock / 2) {
1204 case 81000: 1208 case 81000:
1205 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810; 1209 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1206 break; 1210 break;
1207 case 135000: 1211 case 135000:
1208 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350; 1212 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1209 break; 1213 break;
1210 case 270000: 1214 case 270000:
1211 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700; 1215 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1212 break; 1216 break;
1213 } 1217 }
1214} 1218}
1215 1219
1216static int 1220static int
1217intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates) 1221intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1218{ 1222{
1219 if (intel_dp->num_sink_rates) { 1223 if (intel_dp->num_sink_rates) {
1220 *sink_rates = intel_dp->sink_rates; 1224 *sink_rates = intel_dp->sink_rates;
1221 return intel_dp->num_sink_rates; 1225 return intel_dp->num_sink_rates;
1222 } 1226 }
1223 1227
1224 *sink_rates = default_rates; 1228 *sink_rates = default_rates;
1225 1229
1226 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1; 1230 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1227} 1231}
1228 1232
1229static bool intel_dp_source_supports_hbr2(struct drm_device *dev) 1233static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
1230{ 1234{
1231 /* WaDisableHBR2:skl */ 1235 /* WaDisableHBR2:skl */
1232 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) 1236 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1233 return false; 1237 return false;
1234 1238
1235 if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) || 1239 if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1236 (INTEL_INFO(dev)->gen >= 9)) 1240 (INTEL_INFO(dev)->gen >= 9))
1237 return true; 1241 return true;
1238 else 1242 else
1239 return false; 1243 return false;
1240} 1244}
1241 1245
1242static int 1246static int
1243intel_dp_source_rates(struct drm_device *dev, const int **source_rates) 1247intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1244{ 1248{
1245 int size; 1249 int size;
1246 1250
1247 if (IS_BROXTON(dev)) { 1251 if (IS_BROXTON(dev)) {
1248 *source_rates = bxt_rates; 1252 *source_rates = bxt_rates;
1249 size = ARRAY_SIZE(bxt_rates); 1253 size = ARRAY_SIZE(bxt_rates);
1250 } else if (IS_SKYLAKE(dev)) { 1254 } else if (IS_SKYLAKE(dev)) {
1251 *source_rates = skl_rates; 1255 *source_rates = skl_rates;
1252 size = ARRAY_SIZE(skl_rates); 1256 size = ARRAY_SIZE(skl_rates);
1253 } else { 1257 } else {
1254 *source_rates = default_rates; 1258 *source_rates = default_rates;
1255 size = ARRAY_SIZE(default_rates); 1259 size = ARRAY_SIZE(default_rates);
1256 } 1260 }
1257 1261
1258 /* This depends on the fact that 5.4 is last value in the array */ 1262 /* This depends on the fact that 5.4 is last value in the array */
1259 if (!intel_dp_source_supports_hbr2(dev)) 1263 if (!intel_dp_source_supports_hbr2(dev))
1260 size--; 1264 size--;
1261 1265
1262 return size; 1266 return size;
1263} 1267}
1264 1268
1265static void 1269static void
1266intel_dp_set_clock(struct intel_encoder *encoder, 1270intel_dp_set_clock(struct intel_encoder *encoder,
1267 struct intel_crtc_state *pipe_config) 1271 struct intel_crtc_state *pipe_config)
1268{ 1272{
1269 struct drm_device *dev = encoder->base.dev; 1273 struct drm_device *dev = encoder->base.dev;
1270 const struct dp_link_dpll *divisor = NULL; 1274 const struct dp_link_dpll *divisor = NULL;
1271 int i, count = 0; 1275 int i, count = 0;
1272 1276
1273 if (IS_G4X(dev)) { 1277 if (IS_G4X(dev)) {
1274 divisor = gen4_dpll; 1278 divisor = gen4_dpll;
1275 count = ARRAY_SIZE(gen4_dpll); 1279 count = ARRAY_SIZE(gen4_dpll);
1276 } else if (HAS_PCH_SPLIT(dev)) { 1280 } else if (HAS_PCH_SPLIT(dev)) {
1277 divisor = pch_dpll; 1281 divisor = pch_dpll;
1278 count = ARRAY_SIZE(pch_dpll); 1282 count = ARRAY_SIZE(pch_dpll);
1279 } else if (IS_CHERRYVIEW(dev)) { 1283 } else if (IS_CHERRYVIEW(dev)) {
1280 divisor = chv_dpll; 1284 divisor = chv_dpll;
1281 count = ARRAY_SIZE(chv_dpll); 1285 count = ARRAY_SIZE(chv_dpll);
1282 } else if (IS_VALLEYVIEW(dev)) { 1286 } else if (IS_VALLEYVIEW(dev)) {
1283 divisor = vlv_dpll; 1287 divisor = vlv_dpll;
1284 count = ARRAY_SIZE(vlv_dpll); 1288 count = ARRAY_SIZE(vlv_dpll);
1285 } 1289 }
1286 1290
1287 if (divisor && count) { 1291 if (divisor && count) {
1288 for (i = 0; i < count; i++) { 1292 for (i = 0; i < count; i++) {
1289 if (pipe_config->port_clock == divisor[i].clock) { 1293 if (pipe_config->port_clock == divisor[i].clock) {
1290 pipe_config->dpll = divisor[i].dpll; 1294 pipe_config->dpll = divisor[i].dpll;
1291 pipe_config->clock_set = true; 1295 pipe_config->clock_set = true;
1292 break; 1296 break;
1293 } 1297 }
1294 } 1298 }
1295 } 1299 }
1296} 1300}
1297 1301
1298static int intersect_rates(const int *source_rates, int source_len, 1302static int intersect_rates(const int *source_rates, int source_len,
1299 const int *sink_rates, int sink_len, 1303 const int *sink_rates, int sink_len,
1300 int *common_rates) 1304 int *common_rates)
1301{ 1305{
1302 int i = 0, j = 0, k = 0; 1306 int i = 0, j = 0, k = 0;
1303 1307
1304 while (i < source_len && j < sink_len) { 1308 while (i < source_len && j < sink_len) {
1305 if (source_rates[i] == sink_rates[j]) { 1309 if (source_rates[i] == sink_rates[j]) {
1306 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES)) 1310 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1307 return k; 1311 return k;
1308 common_rates[k] = source_rates[i]; 1312 common_rates[k] = source_rates[i];
1309 ++k; 1313 ++k;
1310 ++i; 1314 ++i;
1311 ++j; 1315 ++j;
1312 } else if (source_rates[i] < sink_rates[j]) { 1316 } else if (source_rates[i] < sink_rates[j]) {
1313 ++i; 1317 ++i;
1314 } else { 1318 } else {
1315 ++j; 1319 ++j;
1316 } 1320 }
1317 } 1321 }
1318 return k; 1322 return k;
1319} 1323}
1320 1324
1321static int intel_dp_common_rates(struct intel_dp *intel_dp, 1325static int intel_dp_common_rates(struct intel_dp *intel_dp,
1322 int *common_rates) 1326 int *common_rates)
1323{ 1327{
1324 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1328 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1325 const int *source_rates, *sink_rates; 1329 const int *source_rates, *sink_rates;
1326 int source_len, sink_len; 1330 int source_len, sink_len;
1327 1331
1328 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates); 1332 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1329 source_len = intel_dp_source_rates(dev, &source_rates); 1333 source_len = intel_dp_source_rates(dev, &source_rates);
1330 1334
1331 return intersect_rates(source_rates, source_len, 1335 return intersect_rates(source_rates, source_len,
1332 sink_rates, sink_len, 1336 sink_rates, sink_len,
1333 common_rates); 1337 common_rates);
1334} 1338}
1335 1339
1336static void snprintf_int_array(char *str, size_t len, 1340static void snprintf_int_array(char *str, size_t len,
1337 const int *array, int nelem) 1341 const int *array, int nelem)
1338{ 1342{
1339 int i; 1343 int i;
1340 1344
1341 str[0] = '\0'; 1345 str[0] = '\0';
1342 1346
1343 for (i = 0; i < nelem; i++) { 1347 for (i = 0; i < nelem; i++) {
1344 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]); 1348 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1345 if (r >= len) 1349 if (r >= len)
1346 return; 1350 return;
1347 str += r; 1351 str += r;
1348 len -= r; 1352 len -= r;
1349 } 1353 }
1350} 1354}
1351 1355
1352static void intel_dp_print_rates(struct intel_dp *intel_dp) 1356static void intel_dp_print_rates(struct intel_dp *intel_dp)
1353{ 1357{
1354 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1358 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1355 const int *source_rates, *sink_rates; 1359 const int *source_rates, *sink_rates;
1356 int source_len, sink_len, common_len; 1360 int source_len, sink_len, common_len;
1357 int common_rates[DP_MAX_SUPPORTED_RATES]; 1361 int common_rates[DP_MAX_SUPPORTED_RATES];
1358 char str[128]; /* FIXME: too big for stack? */ 1362 char str[128]; /* FIXME: too big for stack? */
1359 1363
1360 if ((drm_debug & DRM_UT_KMS) == 0) 1364 if ((drm_debug & DRM_UT_KMS) == 0)
1361 return; 1365 return;
1362 1366
1363 source_len = intel_dp_source_rates(dev, &source_rates); 1367 source_len = intel_dp_source_rates(dev, &source_rates);
1364 snprintf_int_array(str, sizeof(str), source_rates, source_len); 1368 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1365 DRM_DEBUG_KMS("source rates: %s\n", str); 1369 DRM_DEBUG_KMS("source rates: %s\n", str);
1366 1370
1367 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates); 1371 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1368 snprintf_int_array(str, sizeof(str), sink_rates, sink_len); 1372 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1369 DRM_DEBUG_KMS("sink rates: %s\n", str); 1373 DRM_DEBUG_KMS("sink rates: %s\n", str);
1370 1374
1371 common_len = intel_dp_common_rates(intel_dp, common_rates); 1375 common_len = intel_dp_common_rates(intel_dp, common_rates);
1372 snprintf_int_array(str, sizeof(str), common_rates, common_len); 1376 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1373 DRM_DEBUG_KMS("common rates: %s\n", str); 1377 DRM_DEBUG_KMS("common rates: %s\n", str);
1374} 1378}
1375 1379
1376static int rate_to_index(int find, const int *rates) 1380static int rate_to_index(int find, const int *rates)
1377{ 1381{
1378 int i = 0; 1382 int i = 0;
1379 1383
1380 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i) 1384 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1381 if (find == rates[i]) 1385 if (find == rates[i])
1382 break; 1386 break;
1383 1387
1384 return i; 1388 return i;
1385} 1389}
1386 1390
1387int 1391int
1388intel_dp_max_link_rate(struct intel_dp *intel_dp) 1392intel_dp_max_link_rate(struct intel_dp *intel_dp)
1389{ 1393{
1390 int rates[DP_MAX_SUPPORTED_RATES] = {}; 1394 int rates[DP_MAX_SUPPORTED_RATES] = {};
1391 int len; 1395 int len;
1392 1396
1393 len = intel_dp_common_rates(intel_dp, rates); 1397 len = intel_dp_common_rates(intel_dp, rates);
1394 if (WARN_ON(len <= 0)) 1398 if (WARN_ON(len <= 0))
1395 return 162000; 1399 return 162000;
1396 1400
1397 return rates[rate_to_index(0, rates) - 1]; 1401 return rates[rate_to_index(0, rates) - 1];
1398} 1402}
1399 1403
1400int intel_dp_rate_select(struct intel_dp *intel_dp, int rate) 1404int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1401{ 1405{
1402 return rate_to_index(rate, intel_dp->sink_rates); 1406 return rate_to_index(rate, intel_dp->sink_rates);
1403} 1407}
1404 1408
1405static void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, 1409static void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1406 uint8_t *link_bw, uint8_t *rate_select) 1410 uint8_t *link_bw, uint8_t *rate_select)
1407{ 1411{
1408 if (intel_dp->num_sink_rates) { 1412 if (intel_dp->num_sink_rates) {
1409 *link_bw = 0; 1413 *link_bw = 0;
1410 *rate_select = 1414 *rate_select =
1411 intel_dp_rate_select(intel_dp, port_clock); 1415 intel_dp_rate_select(intel_dp, port_clock);
1412 } else { 1416 } else {
1413 *link_bw = drm_dp_link_rate_to_bw_code(port_clock); 1417 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1414 *rate_select = 0; 1418 *rate_select = 0;
1415 } 1419 }
1416} 1420}
1417 1421
1418bool 1422bool
1419intel_dp_compute_config(struct intel_encoder *encoder, 1423intel_dp_compute_config(struct intel_encoder *encoder,
1420 struct intel_crtc_state *pipe_config) 1424 struct intel_crtc_state *pipe_config)
1421{ 1425{
1422 struct drm_device *dev = encoder->base.dev; 1426 struct drm_device *dev = encoder->base.dev;
1423 struct drm_i915_private *dev_priv = dev->dev_private; 1427 struct drm_i915_private *dev_priv = dev->dev_private;
1424 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 1428 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1425 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1429 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1426 enum port port = dp_to_dig_port(intel_dp)->port; 1430 enum port port = dp_to_dig_port(intel_dp)->port;
1427 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc); 1431 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1428 struct intel_connector *intel_connector = intel_dp->attached_connector; 1432 struct intel_connector *intel_connector = intel_dp->attached_connector;
1429 int lane_count, clock; 1433 int lane_count, clock;
1430 int min_lane_count = 1; 1434 int min_lane_count = 1;
1431 int max_lane_count = intel_dp_max_lane_count(intel_dp); 1435 int max_lane_count = intel_dp_max_lane_count(intel_dp);
1432 /* Conveniently, the link BW constants become indices with a shift...*/ 1436 /* Conveniently, the link BW constants become indices with a shift...*/
1433 int min_clock = 0; 1437 int min_clock = 0;
1434 int max_clock; 1438 int max_clock;
1435 int bpp, mode_rate; 1439 int bpp, mode_rate;
1436 int link_avail, link_clock; 1440 int link_avail, link_clock;
1437 int common_rates[DP_MAX_SUPPORTED_RATES] = {}; 1441 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1438 int common_len; 1442 int common_len;
1439 uint8_t link_bw, rate_select; 1443 uint8_t link_bw, rate_select;
1440 1444
1441 common_len = intel_dp_common_rates(intel_dp, common_rates); 1445 common_len = intel_dp_common_rates(intel_dp, common_rates);
1442 1446
1443 /* No common link rates between source and sink */ 1447 /* No common link rates between source and sink */
1444 WARN_ON(common_len <= 0); 1448 WARN_ON(common_len <= 0);
1445 1449
1446 max_clock = common_len - 1; 1450 max_clock = common_len - 1;
1447 1451
1448 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A) 1452 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1449 pipe_config->has_pch_encoder = true; 1453 pipe_config->has_pch_encoder = true;
1450 1454
1451 pipe_config->has_dp_encoder = true; 1455 pipe_config->has_dp_encoder = true;
1452 pipe_config->has_drrs = false; 1456 pipe_config->has_drrs = false;
1453 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A; 1457 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1454 1458
1455 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 1459 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1456 intel_fixed_panel_mode(intel_connector->panel.fixed_mode, 1460 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1457 adjusted_mode); 1461 adjusted_mode);
1458 1462
1459 if (INTEL_INFO(dev)->gen >= 9) { 1463 if (INTEL_INFO(dev)->gen >= 9) {
1460 int ret; 1464 int ret;
1461 ret = skl_update_scaler_crtc(pipe_config); 1465 ret = skl_update_scaler_crtc(pipe_config);
1462 if (ret) 1466 if (ret)
1463 return ret; 1467 return ret;
1464 } 1468 }
1465 1469
1466 if (!HAS_PCH_SPLIT(dev)) 1470 if (!HAS_PCH_SPLIT(dev))
1467 intel_gmch_panel_fitting(intel_crtc, pipe_config, 1471 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1468 intel_connector->panel.fitting_mode); 1472 intel_connector->panel.fitting_mode);
1469 else 1473 else
1470 intel_pch_panel_fitting(intel_crtc, pipe_config, 1474 intel_pch_panel_fitting(intel_crtc, pipe_config,
1471 intel_connector->panel.fitting_mode); 1475 intel_connector->panel.fitting_mode);
1472 } 1476 }
1473 1477
1474 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 1478 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1475 return false; 1479 return false;
1476 1480
1477 DRM_DEBUG_KMS("DP link computation with max lane count %i " 1481 DRM_DEBUG_KMS("DP link computation with max lane count %i "
1478 "max bw %d pixel clock %iKHz\n", 1482 "max bw %d pixel clock %iKHz\n",
1479 max_lane_count, common_rates[max_clock], 1483 max_lane_count, common_rates[max_clock],
1480 adjusted_mode->crtc_clock); 1484 adjusted_mode->crtc_clock);
1481 1485
1482 /* Walk through all bpp values. Luckily they're all nicely spaced with 2 1486 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1483 * bpc in between. */ 1487 * bpc in between. */
1484 bpp = pipe_config->pipe_bpp; 1488 bpp = pipe_config->pipe_bpp;
1485 if (is_edp(intel_dp)) { 1489 if (is_edp(intel_dp)) {
1486 1490
1487 /* Get bpp from vbt only for panels that dont have bpp in edid */ 1491 /* Get bpp from vbt only for panels that dont have bpp in edid */
1488 if (intel_connector->base.display_info.bpc == 0 && 1492 if (intel_connector->base.display_info.bpc == 0 &&
1489 (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) { 1493 (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
1490 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n", 1494 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1491 dev_priv->vbt.edp_bpp); 1495 dev_priv->vbt.edp_bpp);
1492 bpp = dev_priv->vbt.edp_bpp; 1496 bpp = dev_priv->vbt.edp_bpp;
1493 } 1497 }
1494 1498
1495 /* 1499 /*
1496 * Use the maximum clock and number of lanes the eDP panel 1500 * Use the maximum clock and number of lanes the eDP panel
1497 * advertizes being capable of. The panels are generally 1501 * advertizes being capable of. The panels are generally
1498 * designed to support only a single clock and lane 1502 * designed to support only a single clock and lane
1499 * configuration, and typically these values correspond to the 1503 * configuration, and typically these values correspond to the
1500 * native resolution of the panel. 1504 * native resolution of the panel.
1501 */ 1505 */
1502 min_lane_count = max_lane_count; 1506 min_lane_count = max_lane_count;
1503 min_clock = max_clock; 1507 min_clock = max_clock;
1504 } 1508 }
1505 1509
1506 for (; bpp >= 6*3; bpp -= 2*3) { 1510 for (; bpp >= 6*3; bpp -= 2*3) {
1507 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, 1511 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1508 bpp); 1512 bpp);
1509 1513
1510 for (clock = min_clock; clock <= max_clock; clock++) { 1514 for (clock = min_clock; clock <= max_clock; clock++) {
1511 for (lane_count = min_lane_count; 1515 for (lane_count = min_lane_count;
1512 lane_count <= max_lane_count; 1516 lane_count <= max_lane_count;
1513 lane_count <<= 1) { 1517 lane_count <<= 1) {
1514 1518
1515 link_clock = common_rates[clock]; 1519 link_clock = common_rates[clock];
1516 link_avail = intel_dp_max_data_rate(link_clock, 1520 link_avail = intel_dp_max_data_rate(link_clock,
1517 lane_count); 1521 lane_count);
1518 1522
1519 if (mode_rate <= link_avail) { 1523 if (mode_rate <= link_avail) {
1520 goto found; 1524 goto found;
1521 } 1525 }
1522 } 1526 }
1523 } 1527 }
1524 } 1528 }
1525 1529
1526 return false; 1530 return false;
1527 1531
1528found: 1532found:
1529 if (intel_dp->color_range_auto) { 1533 if (intel_dp->color_range_auto) {
1530 /* 1534 /*
1531 * See: 1535 * See:
1532 * CEA-861-E - 5.1 Default Encoding Parameters 1536 * CEA-861-E - 5.1 Default Encoding Parameters
1533 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry 1537 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1534 */ 1538 */
1535 pipe_config->limited_color_range = 1539 pipe_config->limited_color_range =
1536 bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1; 1540 bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1537 } else { 1541 } else {
1538 pipe_config->limited_color_range = 1542 pipe_config->limited_color_range =
1539 intel_dp->limited_color_range; 1543 intel_dp->limited_color_range;
1540 } 1544 }
1541 1545
1542 pipe_config->lane_count = lane_count; 1546 pipe_config->lane_count = lane_count;
1543 1547
1544 pipe_config->pipe_bpp = bpp; 1548 pipe_config->pipe_bpp = bpp;
1545 pipe_config->port_clock = common_rates[clock]; 1549 pipe_config->port_clock = common_rates[clock];
1546 1550
1547 intel_dp_compute_rate(intel_dp, pipe_config->port_clock, 1551 intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1548 &link_bw, &rate_select); 1552 &link_bw, &rate_select);
1549 1553
1550 DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n", 1554 DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1551 link_bw, rate_select, pipe_config->lane_count, 1555 link_bw, rate_select, pipe_config->lane_count,
1552 pipe_config->port_clock, bpp); 1556 pipe_config->port_clock, bpp);
1553 DRM_DEBUG_KMS("DP link bw required %i available %i\n", 1557 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1554 mode_rate, link_avail); 1558 mode_rate, link_avail);
1555 1559
1556 intel_link_compute_m_n(bpp, lane_count, 1560 intel_link_compute_m_n(bpp, lane_count,
1557 adjusted_mode->crtc_clock, 1561 adjusted_mode->crtc_clock,
1558 pipe_config->port_clock, 1562 pipe_config->port_clock,
1559 &pipe_config->dp_m_n); 1563 &pipe_config->dp_m_n);
1560 1564
1561 if (intel_connector->panel.downclock_mode != NULL && 1565 if (intel_connector->panel.downclock_mode != NULL &&
1562 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) { 1566 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1563 pipe_config->has_drrs = true; 1567 pipe_config->has_drrs = true;
1564 intel_link_compute_m_n(bpp, lane_count, 1568 intel_link_compute_m_n(bpp, lane_count,
1565 intel_connector->panel.downclock_mode->clock, 1569 intel_connector->panel.downclock_mode->clock,
1566 pipe_config->port_clock, 1570 pipe_config->port_clock,
1567 &pipe_config->dp_m2_n2); 1571 &pipe_config->dp_m2_n2);
1568 } 1572 }
1569 1573
1570 if (IS_SKYLAKE(dev) && is_edp(intel_dp)) 1574 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
1571 skl_edp_set_pll_config(pipe_config); 1575 skl_edp_set_pll_config(pipe_config);
1572 else if (IS_BROXTON(dev)) 1576 else if (IS_BROXTON(dev))
1573 /* handled in ddi */; 1577 /* handled in ddi */;
1574 else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 1578 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1575 hsw_dp_set_ddi_pll_sel(pipe_config); 1579 hsw_dp_set_ddi_pll_sel(pipe_config);
1576 else 1580 else
1577 intel_dp_set_clock(encoder, pipe_config); 1581 intel_dp_set_clock(encoder, pipe_config);
1578 1582
1579 return true; 1583 return true;
1580} 1584}
1581 1585
1582static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp) 1586static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1583{ 1587{
1584 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1588 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1585 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); 1589 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1586 struct drm_device *dev = crtc->base.dev; 1590 struct drm_device *dev = crtc->base.dev;
1587 struct drm_i915_private *dev_priv = dev->dev_private; 1591 struct drm_i915_private *dev_priv = dev->dev_private;
1588 u32 dpa_ctl; 1592 u32 dpa_ctl;
1589 1593
1590 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", 1594 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1591 crtc->config->port_clock); 1595 crtc->config->port_clock);
1592 dpa_ctl = I915_READ(DP_A); 1596 dpa_ctl = I915_READ(DP_A);
1593 dpa_ctl &= ~DP_PLL_FREQ_MASK; 1597 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1594 1598
1595 if (crtc->config->port_clock == 162000) { 1599 if (crtc->config->port_clock == 162000) {
1596 /* For a long time we've carried around a ILK-DevA w/a for the 1600 /* For a long time we've carried around a ILK-DevA w/a for the
1597 * 160MHz clock. If we're really unlucky, it's still required. 1601 * 160MHz clock. If we're really unlucky, it's still required.
1598 */ 1602 */
1599 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n"); 1603 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1600 dpa_ctl |= DP_PLL_FREQ_160MHZ; 1604 dpa_ctl |= DP_PLL_FREQ_160MHZ;
1601 intel_dp->DP |= DP_PLL_FREQ_160MHZ; 1605 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
1602 } else { 1606 } else {
1603 dpa_ctl |= DP_PLL_FREQ_270MHZ; 1607 dpa_ctl |= DP_PLL_FREQ_270MHZ;
1604 intel_dp->DP |= DP_PLL_FREQ_270MHZ; 1608 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1605 } 1609 }
1606 1610
1607 I915_WRITE(DP_A, dpa_ctl); 1611 I915_WRITE(DP_A, dpa_ctl);
1608 1612
1609 POSTING_READ(DP_A); 1613 POSTING_READ(DP_A);

cvs diff -r1.4 -r1.5 src/sys/external/bsd/drm2/include/drm/drm_irq_netbsd.h (switch to unified diff)

--- src/sys/external/bsd/drm2/include/drm/drm_irq_netbsd.h 2015/01/01 01:15:42 1.4
+++ src/sys/external/bsd/drm2/include/drm/drm_irq_netbsd.h 2018/08/27 06:16:01 1.5
@@ -1,46 +1,48 @@ @@ -1,46 +1,48 @@
1/* $NetBSD: drm_irq_netbsd.h,v 1.4 2015/01/01 01:15:42 mrg Exp $ */ 1/* $NetBSD: drm_irq_netbsd.h,v 1.5 2018/08/27 06:16:01 riastradh Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2013 The NetBSD Foundation, Inc. 4 * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell. 8 * by Taylor R. Campbell.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32#ifndef _DRM_DRM_IRQ_NETBSD_H_ 32#ifndef _DRM_DRM_IRQ_NETBSD_H_
33#define _DRM_DRM_IRQ_NETBSD_H_ 33#define _DRM_DRM_IRQ_NETBSD_H_
34 34
35typedef int irqreturn_t; 35typedef int irqreturn_t;
36 36
37#define IRQ_NONE 0 37enum irqreturn {
38#define IRQ_HANDLED 1 38 IRQ_NONE = 0x0,
 39 IRQ_HANDLED = 0x1,
 40};
39 41
40#define DRM_IRQ_ARGS void *arg 42#define DRM_IRQ_ARGS void *arg
41 43
42#define IRQF_SHARED 0 /* XXX */ 44#define IRQF_SHARED 0 /* XXX */
43 45
44#define IPL_DRM IPL_TTY /* XXX */ 46#define IPL_DRM IPL_TTY /* XXX */
45 47
46#endif /* _DRM_DRM_IRQ_NETBSD_H_ */ 48#endif /* _DRM_DRM_IRQ_NETBSD_H_ */