| @@ -164,1882 +164,1878 @@ void i915_kernel_lost_context(struct drm | | | @@ -164,1882 +164,1878 @@ void i915_kernel_lost_context(struct drm |
164 | } | | 164 | } |
165 | | | 165 | |
166 | static int i915_dma_cleanup(struct drm_device * dev) | | 166 | static int i915_dma_cleanup(struct drm_device * dev) |
167 | { | | 167 | { |
168 | drm_i915_private_t *dev_priv = dev->dev_private; | | 168 | drm_i915_private_t *dev_priv = dev->dev_private; |
169 | int i; | | 169 | int i; |
170 | | | 170 | |
171 | /* Make sure interrupts are disabled here because the uninstall ioctl | | 171 | /* Make sure interrupts are disabled here because the uninstall ioctl |
172 | * may not have been called from userspace and after dev_private | | 172 | * may not have been called from userspace and after dev_private |
173 | * is freed, it's too late. | | 173 | * is freed, it's too late. |
174 | */ | | 174 | */ |
175 | if (dev->irq_enabled) | | 175 | if (dev->irq_enabled) |
176 | drm_irq_uninstall(dev); | | 176 | drm_irq_uninstall(dev); |
177 | | | 177 | |
178 | mutex_lock(&dev->struct_mutex); | | 178 | mutex_lock(&dev->struct_mutex); |
179 | for (i = 0; i < I915_NUM_RINGS; i++) | | 179 | for (i = 0; i < I915_NUM_RINGS; i++) |
180 | intel_cleanup_ring_buffer(&dev_priv->ring[i]); | | 180 | intel_cleanup_ring_buffer(&dev_priv->ring[i]); |
181 | mutex_unlock(&dev->struct_mutex); | | 181 | mutex_unlock(&dev->struct_mutex); |
182 | | | 182 | |
183 | /* Clear the HWS virtual address at teardown */ | | 183 | /* Clear the HWS virtual address at teardown */ |
184 | if (I915_NEED_GFX_HWS(dev)) | | 184 | if (I915_NEED_GFX_HWS(dev)) |
185 | i915_free_hws(dev); | | 185 | i915_free_hws(dev); |
186 | | | 186 | |
187 | return 0; | | 187 | return 0; |
188 | } | | 188 | } |
189 | | | 189 | |
190 | static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) | | 190 | static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) |
191 | { | | 191 | { |
192 | drm_i915_private_t *dev_priv = dev->dev_private; | | 192 | drm_i915_private_t *dev_priv = dev->dev_private; |
193 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | | 193 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
194 | int ret; | | 194 | int ret; |
195 | | | 195 | |
196 | master_priv->sarea = drm_getsarea(dev); | | 196 | master_priv->sarea = drm_getsarea(dev); |
197 | if (master_priv->sarea) { | | 197 | if (master_priv->sarea) { |
198 | master_priv->sarea_priv = (drm_i915_sarea_t *) | | 198 | master_priv->sarea_priv = (drm_i915_sarea_t *) |
199 | ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset); | | 199 | ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset); |
200 | } else { | | 200 | } else { |
201 | DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n"); | | 201 | DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n"); |
202 | } | | 202 | } |
203 | | | 203 | |
204 | if (init->ring_size != 0) { | | 204 | if (init->ring_size != 0) { |
205 | if (LP_RING(dev_priv)->obj != NULL) { | | 205 | if (LP_RING(dev_priv)->obj != NULL) { |
206 | i915_dma_cleanup(dev); | | 206 | i915_dma_cleanup(dev); |
207 | DRM_ERROR("Client tried to initialize ringbuffer in " | | 207 | DRM_ERROR("Client tried to initialize ringbuffer in " |
208 | "GEM mode\n"); | | 208 | "GEM mode\n"); |
209 | return -EINVAL; | | 209 | return -EINVAL; |
210 | } | | 210 | } |
211 | | | 211 | |
212 | ret = intel_render_ring_init_dri(dev, | | 212 | ret = intel_render_ring_init_dri(dev, |
213 | init->ring_start, | | 213 | init->ring_start, |
214 | init->ring_size); | | 214 | init->ring_size); |
215 | if (ret) { | | 215 | if (ret) { |
216 | i915_dma_cleanup(dev); | | 216 | i915_dma_cleanup(dev); |
217 | return ret; | | 217 | return ret; |
218 | } | | 218 | } |
219 | } | | 219 | } |
220 | | | 220 | |
221 | dev_priv->dri1.cpp = init->cpp; | | 221 | dev_priv->dri1.cpp = init->cpp; |
222 | dev_priv->dri1.back_offset = init->back_offset; | | 222 | dev_priv->dri1.back_offset = init->back_offset; |
223 | dev_priv->dri1.front_offset = init->front_offset; | | 223 | dev_priv->dri1.front_offset = init->front_offset; |
224 | dev_priv->dri1.current_page = 0; | | 224 | dev_priv->dri1.current_page = 0; |
225 | if (master_priv->sarea_priv) | | 225 | if (master_priv->sarea_priv) |
226 | master_priv->sarea_priv->pf_current_page = 0; | | 226 | master_priv->sarea_priv->pf_current_page = 0; |
227 | | | 227 | |
228 | /* Allow hardware batchbuffers unless told otherwise. | | 228 | /* Allow hardware batchbuffers unless told otherwise. |
229 | */ | | 229 | */ |
230 | dev_priv->dri1.allow_batchbuffer = 1; | | 230 | dev_priv->dri1.allow_batchbuffer = 1; |
231 | | | 231 | |
232 | return 0; | | 232 | return 0; |
233 | } | | 233 | } |
234 | | | 234 | |
235 | static int i915_dma_resume(struct drm_device * dev) | | 235 | static int i915_dma_resume(struct drm_device * dev) |
236 | { | | 236 | { |
237 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | | 237 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
238 | struct intel_ring_buffer *ring = LP_RING(dev_priv); | | 238 | struct intel_ring_buffer *ring = LP_RING(dev_priv); |
239 | | | 239 | |
240 | DRM_DEBUG_DRIVER("%s\n", __func__); | | 240 | DRM_DEBUG_DRIVER("%s\n", __func__); |
241 | | | 241 | |
242 | #ifdef __NetBSD__ | | 242 | #ifdef __NetBSD__ |
243 | if (!ring->virtual_start_mapped) { | | 243 | if (!ring->virtual_start_mapped) { |
244 | DRM_ERROR("can not ioremap virtual address for" | | 244 | DRM_ERROR("can not ioremap virtual address for" |
245 | " ring buffer\n"); | | 245 | " ring buffer\n"); |
246 | return -ENOMEM; | | 246 | return -ENOMEM; |
247 | } | | 247 | } |
248 | #else | | 248 | #else |
249 | if (ring->virtual_start == NULL) { | | 249 | if (ring->virtual_start == NULL) { |
250 | DRM_ERROR("can not ioremap virtual address for" | | 250 | DRM_ERROR("can not ioremap virtual address for" |
251 | " ring buffer\n"); | | 251 | " ring buffer\n"); |
252 | return -ENOMEM; | | 252 | return -ENOMEM; |
253 | } | | 253 | } |
254 | #endif | | 254 | #endif |
255 | | | 255 | |
256 | /* Program Hardware Status Page */ | | 256 | /* Program Hardware Status Page */ |
257 | if (!ring->status_page.page_addr) { | | 257 | if (!ring->status_page.page_addr) { |
258 | DRM_ERROR("Can not find hardware status page\n"); | | 258 | DRM_ERROR("Can not find hardware status page\n"); |
259 | return -EINVAL; | | 259 | return -EINVAL; |
260 | } | | 260 | } |
261 | DRM_DEBUG_DRIVER("hw status page @ %p\n", | | 261 | DRM_DEBUG_DRIVER("hw status page @ %p\n", |
262 | ring->status_page.page_addr); | | 262 | ring->status_page.page_addr); |
263 | if (ring->status_page.gfx_addr != 0) | | 263 | if (ring->status_page.gfx_addr != 0) |
264 | intel_ring_setup_status_page(ring); | | 264 | intel_ring_setup_status_page(ring); |
265 | else | | 265 | else |
266 | i915_write_hws_pga(dev); | | 266 | i915_write_hws_pga(dev); |
267 | | | 267 | |
268 | DRM_DEBUG_DRIVER("Enabled hardware status page\n"); | | 268 | DRM_DEBUG_DRIVER("Enabled hardware status page\n"); |
269 | | | 269 | |
270 | return 0; | | 270 | return 0; |
271 | } | | 271 | } |
272 | | | 272 | |
273 | static int i915_dma_init(struct drm_device *dev, void *data, | | 273 | static int i915_dma_init(struct drm_device *dev, void *data, |
274 | struct drm_file *file_priv) | | 274 | struct drm_file *file_priv) |
275 | { | | 275 | { |
276 | drm_i915_init_t *init = data; | | 276 | drm_i915_init_t *init = data; |
277 | int retcode = 0; | | 277 | int retcode = 0; |
278 | | | 278 | |
279 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | | 279 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
280 | return -ENODEV; | | 280 | return -ENODEV; |
281 | | | 281 | |
282 | switch (init->func) { | | 282 | switch (init->func) { |
283 | case I915_INIT_DMA: | | 283 | case I915_INIT_DMA: |
284 | retcode = i915_initialize(dev, init); | | 284 | retcode = i915_initialize(dev, init); |
285 | break; | | 285 | break; |
286 | case I915_CLEANUP_DMA: | | 286 | case I915_CLEANUP_DMA: |
287 | retcode = i915_dma_cleanup(dev); | | 287 | retcode = i915_dma_cleanup(dev); |
288 | break; | | 288 | break; |
289 | case I915_RESUME_DMA: | | 289 | case I915_RESUME_DMA: |
290 | retcode = i915_dma_resume(dev); | | 290 | retcode = i915_dma_resume(dev); |
291 | break; | | 291 | break; |
292 | default: | | 292 | default: |
293 | retcode = -EINVAL; | | 293 | retcode = -EINVAL; |
294 | break; | | 294 | break; |
295 | } | | 295 | } |
296 | | | 296 | |
297 | return retcode; | | 297 | return retcode; |
298 | } | | 298 | } |
299 | | | 299 | |
300 | /* Implement basically the same security restrictions as hardware does | | 300 | /* Implement basically the same security restrictions as hardware does |
301 | * for MI_BATCH_NON_SECURE. These can be made stricter at any time. | | 301 | * for MI_BATCH_NON_SECURE. These can be made stricter at any time. |
302 | * | | 302 | * |
303 | * Most of the calculations below involve calculating the size of a | | 303 | * Most of the calculations below involve calculating the size of a |
304 | * particular instruction. It's important to get the size right as | | 304 | * particular instruction. It's important to get the size right as |
305 | * that tells us where the next instruction to check is. Any illegal | | 305 | * that tells us where the next instruction to check is. Any illegal |
306 | * instruction detected will be given a size of zero, which is a | | 306 | * instruction detected will be given a size of zero, which is a |
307 | * signal to abort the rest of the buffer. | | 307 | * signal to abort the rest of the buffer. |
308 | */ | | 308 | */ |
309 | static int validate_cmd(int cmd) | | 309 | static int validate_cmd(int cmd) |
310 | { | | 310 | { |
311 | switch (((cmd >> 29) & 0x7)) { | | 311 | switch (((cmd >> 29) & 0x7)) { |
312 | case 0x0: | | 312 | case 0x0: |
313 | switch ((cmd >> 23) & 0x3f) { | | 313 | switch ((cmd >> 23) & 0x3f) { |
314 | case 0x0: | | 314 | case 0x0: |
315 | return 1; /* MI_NOOP */ | | 315 | return 1; /* MI_NOOP */ |
316 | case 0x4: | | 316 | case 0x4: |
317 | return 1; /* MI_FLUSH */ | | 317 | return 1; /* MI_FLUSH */ |
318 | default: | | 318 | default: |
319 | return 0; /* disallow everything else */ | | 319 | return 0; /* disallow everything else */ |
320 | } | | 320 | } |
321 | break; | | 321 | break; |
322 | case 0x1: | | 322 | case 0x1: |
323 | return 0; /* reserved */ | | 323 | return 0; /* reserved */ |
324 | case 0x2: | | 324 | case 0x2: |
325 | return (cmd & 0xff) + 2; /* 2d commands */ | | 325 | return (cmd & 0xff) + 2; /* 2d commands */ |
326 | case 0x3: | | 326 | case 0x3: |
327 | if (((cmd >> 24) & 0x1f) <= 0x18) | | 327 | if (((cmd >> 24) & 0x1f) <= 0x18) |
328 | return 1; | | 328 | return 1; |
329 | | | 329 | |
330 | switch ((cmd >> 24) & 0x1f) { | | 330 | switch ((cmd >> 24) & 0x1f) { |
331 | case 0x1c: | | 331 | case 0x1c: |
332 | return 1; | | 332 | return 1; |
333 | case 0x1d: | | 333 | case 0x1d: |
334 | switch ((cmd >> 16) & 0xff) { | | 334 | switch ((cmd >> 16) & 0xff) { |
335 | case 0x3: | | 335 | case 0x3: |
336 | return (cmd & 0x1f) + 2; | | 336 | return (cmd & 0x1f) + 2; |
337 | case 0x4: | | 337 | case 0x4: |
338 | return (cmd & 0xf) + 2; | | 338 | return (cmd & 0xf) + 2; |
339 | default: | | 339 | default: |
340 | return (cmd & 0xffff) + 2; | | 340 | return (cmd & 0xffff) + 2; |
341 | } | | 341 | } |
342 | case 0x1e: | | 342 | case 0x1e: |
343 | if (cmd & (1 << 23)) | | 343 | if (cmd & (1 << 23)) |
344 | return (cmd & 0xffff) + 1; | | 344 | return (cmd & 0xffff) + 1; |
345 | else | | 345 | else |
346 | return 1; | | 346 | return 1; |
347 | case 0x1f: | | 347 | case 0x1f: |
348 | if ((cmd & (1 << 23)) == 0) /* inline vertices */ | | 348 | if ((cmd & (1 << 23)) == 0) /* inline vertices */ |
349 | return (cmd & 0x1ffff) + 2; | | 349 | return (cmd & 0x1ffff) + 2; |
350 | else if (cmd & (1 << 17)) /* indirect random */ | | 350 | else if (cmd & (1 << 17)) /* indirect random */ |
351 | if ((cmd & 0xffff) == 0) | | 351 | if ((cmd & 0xffff) == 0) |
352 | return 0; /* unknown length, too hard */ | | 352 | return 0; /* unknown length, too hard */ |
353 | else | | 353 | else |
354 | return (((cmd & 0xffff) + 1) / 2) + 1; | | 354 | return (((cmd & 0xffff) + 1) / 2) + 1; |
355 | else | | 355 | else |
356 | return 2; /* indirect sequential */ | | 356 | return 2; /* indirect sequential */ |
357 | default: | | 357 | default: |
358 | return 0; | | 358 | return 0; |
359 | } | | 359 | } |
360 | default: | | 360 | default: |
361 | return 0; | | 361 | return 0; |
362 | } | | 362 | } |
363 | | | 363 | |
364 | return 0; | | 364 | return 0; |
365 | } | | 365 | } |
366 | | | 366 | |
367 | static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) | | 367 | static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) |
368 | { | | 368 | { |
369 | drm_i915_private_t *dev_priv = dev->dev_private; | | 369 | drm_i915_private_t *dev_priv = dev->dev_private; |
370 | int i, ret; | | 370 | int i, ret; |
371 | | | 371 | |
372 | if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8) | | 372 | if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8) |
373 | return -EINVAL; | | 373 | return -EINVAL; |
374 | | | 374 | |
375 | for (i = 0; i < dwords;) { | | 375 | for (i = 0; i < dwords;) { |
376 | int sz = validate_cmd(buffer[i]); | | 376 | int sz = validate_cmd(buffer[i]); |
377 | if (sz == 0 || i + sz > dwords) | | 377 | if (sz == 0 || i + sz > dwords) |
378 | return -EINVAL; | | 378 | return -EINVAL; |
379 | i += sz; | | 379 | i += sz; |
380 | } | | 380 | } |
381 | | | 381 | |
382 | ret = BEGIN_LP_RING((dwords+1)&~1); | | 382 | ret = BEGIN_LP_RING((dwords+1)&~1); |
383 | if (ret) | | 383 | if (ret) |
384 | return ret; | | 384 | return ret; |
385 | | | 385 | |
386 | for (i = 0; i < dwords; i++) | | 386 | for (i = 0; i < dwords; i++) |
387 | OUT_RING(buffer[i]); | | 387 | OUT_RING(buffer[i]); |
388 | if (dwords & 1) | | 388 | if (dwords & 1) |
389 | OUT_RING(0); | | 389 | OUT_RING(0); |
390 | | | 390 | |
391 | ADVANCE_LP_RING(); | | 391 | ADVANCE_LP_RING(); |
392 | | | 392 | |
393 | return 0; | | 393 | return 0; |
394 | } | | 394 | } |
395 | | | 395 | |
396 | int | | 396 | int |
397 | i915_emit_box(struct drm_device *dev, | | 397 | i915_emit_box(struct drm_device *dev, |
398 | struct drm_clip_rect *box, | | 398 | struct drm_clip_rect *box, |
399 | int DR1, int DR4) | | 399 | int DR1, int DR4) |
400 | { | | 400 | { |
401 | struct drm_i915_private *dev_priv = dev->dev_private; | | 401 | struct drm_i915_private *dev_priv = dev->dev_private; |
402 | int ret; | | 402 | int ret; |
403 | | | 403 | |
404 | if (box->y2 <= box->y1 || box->x2 <= box->x1 || | | 404 | if (box->y2 <= box->y1 || box->x2 <= box->x1 || |
405 | box->y2 <= 0 || box->x2 <= 0) { | | 405 | box->y2 <= 0 || box->x2 <= 0) { |
406 | DRM_ERROR("Bad box %d,%d..%d,%d\n", | | 406 | DRM_ERROR("Bad box %d,%d..%d,%d\n", |
407 | box->x1, box->y1, box->x2, box->y2); | | 407 | box->x1, box->y1, box->x2, box->y2); |
408 | return -EINVAL; | | 408 | return -EINVAL; |
409 | } | | 409 | } |
410 | | | 410 | |
411 | if (INTEL_INFO(dev)->gen >= 4) { | | 411 | if (INTEL_INFO(dev)->gen >= 4) { |
412 | ret = BEGIN_LP_RING(4); | | 412 | ret = BEGIN_LP_RING(4); |
413 | if (ret) | | 413 | if (ret) |
414 | return ret; | | 414 | return ret; |
415 | | | 415 | |
416 | OUT_RING(GFX_OP_DRAWRECT_INFO_I965); | | 416 | OUT_RING(GFX_OP_DRAWRECT_INFO_I965); |
417 | OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); | | 417 | OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); |
418 | OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); | | 418 | OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); |
419 | OUT_RING(DR4); | | 419 | OUT_RING(DR4); |
420 | } else { | | 420 | } else { |
421 | ret = BEGIN_LP_RING(6); | | 421 | ret = BEGIN_LP_RING(6); |
422 | if (ret) | | 422 | if (ret) |
423 | return ret; | | 423 | return ret; |
424 | | | 424 | |
425 | OUT_RING(GFX_OP_DRAWRECT_INFO); | | 425 | OUT_RING(GFX_OP_DRAWRECT_INFO); |
426 | OUT_RING(DR1); | | 426 | OUT_RING(DR1); |
427 | OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); | | 427 | OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); |
428 | OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); | | 428 | OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); |
429 | OUT_RING(DR4); | | 429 | OUT_RING(DR4); |
430 | OUT_RING(0); | | 430 | OUT_RING(0); |
431 | } | | 431 | } |
432 | ADVANCE_LP_RING(); | | 432 | ADVANCE_LP_RING(); |
433 | | | 433 | |
434 | return 0; | | 434 | return 0; |
435 | } | | 435 | } |
436 | | | 436 | |
437 | /* XXX: Emitting the counter should really be moved to part of the IRQ | | 437 | /* XXX: Emitting the counter should really be moved to part of the IRQ |
438 | * emit. For now, do it in both places: | | 438 | * emit. For now, do it in both places: |
439 | */ | | 439 | */ |
440 | | | 440 | |
441 | static void i915_emit_breadcrumb(struct drm_device *dev) | | 441 | static void i915_emit_breadcrumb(struct drm_device *dev) |
442 | { | | 442 | { |
443 | drm_i915_private_t *dev_priv = dev->dev_private; | | 443 | drm_i915_private_t *dev_priv = dev->dev_private; |
444 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | | 444 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
445 | | | 445 | |
446 | dev_priv->dri1.counter++; | | 446 | dev_priv->dri1.counter++; |
447 | if (dev_priv->dri1.counter > 0x7FFFFFFFUL) | | 447 | if (dev_priv->dri1.counter > 0x7FFFFFFFUL) |
448 | dev_priv->dri1.counter = 0; | | 448 | dev_priv->dri1.counter = 0; |
449 | if (master_priv->sarea_priv) | | 449 | if (master_priv->sarea_priv) |
450 | master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter; | | 450 | master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter; |
451 | | | 451 | |
452 | if (BEGIN_LP_RING(4) == 0) { | | 452 | if (BEGIN_LP_RING(4) == 0) { |
453 | OUT_RING(MI_STORE_DWORD_INDEX); | | 453 | OUT_RING(MI_STORE_DWORD_INDEX); |
454 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | | 454 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
455 | OUT_RING(dev_priv->dri1.counter); | | 455 | OUT_RING(dev_priv->dri1.counter); |
456 | OUT_RING(0); | | 456 | OUT_RING(0); |
457 | ADVANCE_LP_RING(); | | 457 | ADVANCE_LP_RING(); |
458 | } | | 458 | } |
459 | } | | 459 | } |
460 | | | 460 | |
461 | static int i915_dispatch_cmdbuffer(struct drm_device * dev, | | 461 | static int i915_dispatch_cmdbuffer(struct drm_device * dev, |
462 | drm_i915_cmdbuffer_t *cmd, | | 462 | drm_i915_cmdbuffer_t *cmd, |
463 | struct drm_clip_rect *cliprects, | | 463 | struct drm_clip_rect *cliprects, |
464 | void *cmdbuf) | | 464 | void *cmdbuf) |
465 | { | | 465 | { |
466 | int nbox = cmd->num_cliprects; | | 466 | int nbox = cmd->num_cliprects; |
467 | int i = 0, count, ret; | | 467 | int i = 0, count, ret; |
468 | | | 468 | |
469 | if (cmd->sz & 0x3) { | | 469 | if (cmd->sz & 0x3) { |
470 | DRM_ERROR("alignment"); | | 470 | DRM_ERROR("alignment"); |
471 | return -EINVAL; | | 471 | return -EINVAL; |
472 | } | | 472 | } |
473 | | | 473 | |
474 | i915_kernel_lost_context(dev); | | 474 | i915_kernel_lost_context(dev); |
475 | | | 475 | |
476 | count = nbox ? nbox : 1; | | 476 | count = nbox ? nbox : 1; |
477 | | | 477 | |
478 | for (i = 0; i < count; i++) { | | 478 | for (i = 0; i < count; i++) { |
479 | if (i < nbox) { | | 479 | if (i < nbox) { |
480 | ret = i915_emit_box(dev, &cliprects[i], | | 480 | ret = i915_emit_box(dev, &cliprects[i], |
481 | cmd->DR1, cmd->DR4); | | 481 | cmd->DR1, cmd->DR4); |
482 | if (ret) | | 482 | if (ret) |
483 | return ret; | | 483 | return ret; |
484 | } | | 484 | } |
485 | | | 485 | |
486 | ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4); | | 486 | ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4); |
487 | if (ret) | | 487 | if (ret) |
488 | return ret; | | 488 | return ret; |
489 | } | | 489 | } |
490 | | | 490 | |
491 | i915_emit_breadcrumb(dev); | | 491 | i915_emit_breadcrumb(dev); |
492 | return 0; | | 492 | return 0; |
493 | } | | 493 | } |
494 | | | 494 | |
495 | static int i915_dispatch_batchbuffer(struct drm_device * dev, | | 495 | static int i915_dispatch_batchbuffer(struct drm_device * dev, |
496 | drm_i915_batchbuffer_t * batch, | | 496 | drm_i915_batchbuffer_t * batch, |
497 | struct drm_clip_rect *cliprects) | | 497 | struct drm_clip_rect *cliprects) |
498 | { | | 498 | { |
499 | struct drm_i915_private *dev_priv = dev->dev_private; | | 499 | struct drm_i915_private *dev_priv = dev->dev_private; |
500 | int nbox = batch->num_cliprects; | | 500 | int nbox = batch->num_cliprects; |
501 | int i, count, ret; | | 501 | int i, count, ret; |
502 | | | 502 | |
503 | if ((batch->start | batch->used) & 0x7) { | | 503 | if ((batch->start | batch->used) & 0x7) { |
504 | DRM_ERROR("alignment"); | | 504 | DRM_ERROR("alignment"); |
505 | return -EINVAL; | | 505 | return -EINVAL; |
506 | } | | 506 | } |
507 | | | 507 | |
508 | i915_kernel_lost_context(dev); | | 508 | i915_kernel_lost_context(dev); |
509 | | | 509 | |
510 | count = nbox ? nbox : 1; | | 510 | count = nbox ? nbox : 1; |
511 | for (i = 0; i < count; i++) { | | 511 | for (i = 0; i < count; i++) { |
512 | if (i < nbox) { | | 512 | if (i < nbox) { |
513 | ret = i915_emit_box(dev, &cliprects[i], | | 513 | ret = i915_emit_box(dev, &cliprects[i], |
514 | batch->DR1, batch->DR4); | | 514 | batch->DR1, batch->DR4); |
515 | if (ret) | | 515 | if (ret) |
516 | return ret; | | 516 | return ret; |
517 | } | | 517 | } |
518 | | | 518 | |
519 | if (!IS_I830(dev) && !IS_845G(dev)) { | | 519 | if (!IS_I830(dev) && !IS_845G(dev)) { |
520 | ret = BEGIN_LP_RING(2); | | 520 | ret = BEGIN_LP_RING(2); |
521 | if (ret) | | 521 | if (ret) |
522 | return ret; | | 522 | return ret; |
523 | | | 523 | |
524 | if (INTEL_INFO(dev)->gen >= 4) { | | 524 | if (INTEL_INFO(dev)->gen >= 4) { |
525 | OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); | | 525 | OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); |
526 | OUT_RING(batch->start); | | 526 | OUT_RING(batch->start); |
527 | } else { | | 527 | } else { |
528 | OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); | | 528 | OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); |
529 | OUT_RING(batch->start | MI_BATCH_NON_SECURE); | | 529 | OUT_RING(batch->start | MI_BATCH_NON_SECURE); |
530 | } | | 530 | } |
531 | } else { | | 531 | } else { |
532 | ret = BEGIN_LP_RING(4); | | 532 | ret = BEGIN_LP_RING(4); |
533 | if (ret) | | 533 | if (ret) |
534 | return ret; | | 534 | return ret; |
535 | | | 535 | |
536 | OUT_RING(MI_BATCH_BUFFER); | | 536 | OUT_RING(MI_BATCH_BUFFER); |
537 | OUT_RING(batch->start | MI_BATCH_NON_SECURE); | | 537 | OUT_RING(batch->start | MI_BATCH_NON_SECURE); |
538 | OUT_RING(batch->start + batch->used - 4); | | 538 | OUT_RING(batch->start + batch->used - 4); |
539 | OUT_RING(0); | | 539 | OUT_RING(0); |
540 | } | | 540 | } |
541 | ADVANCE_LP_RING(); | | 541 | ADVANCE_LP_RING(); |
542 | } | | 542 | } |
543 | | | 543 | |
544 | | | 544 | |
545 | if (IS_G4X(dev) || IS_GEN5(dev)) { | | 545 | if (IS_G4X(dev) || IS_GEN5(dev)) { |
546 | if (BEGIN_LP_RING(2) == 0) { | | 546 | if (BEGIN_LP_RING(2) == 0) { |
547 | OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP); | | 547 | OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP); |
548 | OUT_RING(MI_NOOP); | | 548 | OUT_RING(MI_NOOP); |
549 | ADVANCE_LP_RING(); | | 549 | ADVANCE_LP_RING(); |
550 | } | | 550 | } |
551 | } | | 551 | } |
552 | | | 552 | |
553 | i915_emit_breadcrumb(dev); | | 553 | i915_emit_breadcrumb(dev); |
554 | return 0; | | 554 | return 0; |
555 | } | | 555 | } |
556 | | | 556 | |
557 | static int i915_dispatch_flip(struct drm_device * dev) | | 557 | static int i915_dispatch_flip(struct drm_device * dev) |
558 | { | | 558 | { |
559 | drm_i915_private_t *dev_priv = dev->dev_private; | | 559 | drm_i915_private_t *dev_priv = dev->dev_private; |
560 | struct drm_i915_master_private *master_priv = | | 560 | struct drm_i915_master_private *master_priv = |
561 | dev->primary->master->driver_priv; | | 561 | dev->primary->master->driver_priv; |
562 | int ret; | | 562 | int ret; |
563 | | | 563 | |
564 | if (!master_priv->sarea_priv) | | 564 | if (!master_priv->sarea_priv) |
565 | return -EINVAL; | | 565 | return -EINVAL; |
566 | | | 566 | |
567 | DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n", | | 567 | DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n", |
568 | __func__, | | 568 | __func__, |
569 | dev_priv->dri1.current_page, | | 569 | dev_priv->dri1.current_page, |
570 | master_priv->sarea_priv->pf_current_page); | | 570 | master_priv->sarea_priv->pf_current_page); |
571 | | | 571 | |
572 | i915_kernel_lost_context(dev); | | 572 | i915_kernel_lost_context(dev); |
573 | | | 573 | |
574 | ret = BEGIN_LP_RING(10); | | 574 | ret = BEGIN_LP_RING(10); |
575 | if (ret) | | 575 | if (ret) |
576 | return ret; | | 576 | return ret; |
577 | | | 577 | |
578 | OUT_RING(MI_FLUSH | MI_READ_FLUSH); | | 578 | OUT_RING(MI_FLUSH | MI_READ_FLUSH); |
579 | OUT_RING(0); | | 579 | OUT_RING(0); |
580 | | | 580 | |
581 | OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); | | 581 | OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); |
582 | OUT_RING(0); | | 582 | OUT_RING(0); |
583 | if (dev_priv->dri1.current_page == 0) { | | 583 | if (dev_priv->dri1.current_page == 0) { |
584 | OUT_RING(dev_priv->dri1.back_offset); | | 584 | OUT_RING(dev_priv->dri1.back_offset); |
585 | dev_priv->dri1.current_page = 1; | | 585 | dev_priv->dri1.current_page = 1; |
586 | } else { | | 586 | } else { |
587 | OUT_RING(dev_priv->dri1.front_offset); | | 587 | OUT_RING(dev_priv->dri1.front_offset); |
588 | dev_priv->dri1.current_page = 0; | | 588 | dev_priv->dri1.current_page = 0; |
589 | } | | 589 | } |
590 | OUT_RING(0); | | 590 | OUT_RING(0); |
591 | | | 591 | |
592 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP); | | 592 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP); |
593 | OUT_RING(0); | | 593 | OUT_RING(0); |
594 | | | 594 | |
595 | ADVANCE_LP_RING(); | | 595 | ADVANCE_LP_RING(); |
596 | | | 596 | |
597 | master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++; | | 597 | master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++; |
598 | | | 598 | |
599 | if (BEGIN_LP_RING(4) == 0) { | | 599 | if (BEGIN_LP_RING(4) == 0) { |
600 | OUT_RING(MI_STORE_DWORD_INDEX); | | 600 | OUT_RING(MI_STORE_DWORD_INDEX); |
601 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | | 601 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
602 | OUT_RING(dev_priv->dri1.counter); | | 602 | OUT_RING(dev_priv->dri1.counter); |
603 | OUT_RING(0); | | 603 | OUT_RING(0); |
604 | ADVANCE_LP_RING(); | | 604 | ADVANCE_LP_RING(); |
605 | } | | 605 | } |
606 | | | 606 | |
607 | master_priv->sarea_priv->pf_current_page = dev_priv->dri1.current_page; | | 607 | master_priv->sarea_priv->pf_current_page = dev_priv->dri1.current_page; |
608 | return 0; | | 608 | return 0; |
609 | } | | 609 | } |
610 | | | 610 | |
611 | static int i915_quiescent(struct drm_device *dev) | | 611 | static int i915_quiescent(struct drm_device *dev) |
612 | { | | 612 | { |
613 | i915_kernel_lost_context(dev); | | 613 | i915_kernel_lost_context(dev); |
614 | return intel_ring_idle(LP_RING(dev->dev_private)); | | 614 | return intel_ring_idle(LP_RING(dev->dev_private)); |
615 | } | | 615 | } |
616 | | | 616 | |
617 | static int i915_flush_ioctl(struct drm_device *dev, void *data, | | 617 | static int i915_flush_ioctl(struct drm_device *dev, void *data, |
618 | struct drm_file *file_priv) | | 618 | struct drm_file *file_priv) |
619 | { | | 619 | { |
620 | int ret; | | 620 | int ret; |
621 | | | 621 | |
622 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | | 622 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
623 | return -ENODEV; | | 623 | return -ENODEV; |
624 | | | 624 | |
625 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); | | 625 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); |
626 | | | 626 | |
627 | mutex_lock(&dev->struct_mutex); | | 627 | mutex_lock(&dev->struct_mutex); |
628 | ret = i915_quiescent(dev); | | 628 | ret = i915_quiescent(dev); |
629 | mutex_unlock(&dev->struct_mutex); | | 629 | mutex_unlock(&dev->struct_mutex); |
630 | | | 630 | |
631 | return ret; | | 631 | return ret; |
632 | } | | 632 | } |
633 | | | 633 | |
634 | static int i915_batchbuffer(struct drm_device *dev, void *data, | | 634 | static int i915_batchbuffer(struct drm_device *dev, void *data, |
635 | struct drm_file *file_priv) | | 635 | struct drm_file *file_priv) |
636 | { | | 636 | { |
637 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | | 637 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
638 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | | 638 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
639 | drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) | | 639 | drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) |
640 | master_priv->sarea_priv; | | 640 | master_priv->sarea_priv; |
641 | drm_i915_batchbuffer_t *batch = data; | | 641 | drm_i915_batchbuffer_t *batch = data; |
642 | int ret; | | 642 | int ret; |
643 | struct drm_clip_rect *cliprects = NULL; | | 643 | struct drm_clip_rect *cliprects = NULL; |
644 | | | 644 | |
645 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | | 645 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
646 | return -ENODEV; | | 646 | return -ENODEV; |
647 | | | 647 | |
648 | if (!dev_priv->dri1.allow_batchbuffer) { | | 648 | if (!dev_priv->dri1.allow_batchbuffer) { |
649 | DRM_ERROR("Batchbuffer ioctl disabled\n"); | | 649 | DRM_ERROR("Batchbuffer ioctl disabled\n"); |
650 | return -EINVAL; | | 650 | return -EINVAL; |
651 | } | | 651 | } |
652 | | | 652 | |
653 | DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n", | | 653 | DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n", |
654 | batch->start, batch->used, batch->num_cliprects); | | 654 | batch->start, batch->used, batch->num_cliprects); |
655 | | | 655 | |
656 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); | | 656 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); |
657 | | | 657 | |
658 | if (batch->num_cliprects < 0) | | 658 | if (batch->num_cliprects < 0) |
659 | return -EINVAL; | | 659 | return -EINVAL; |
660 | | | 660 | |
661 | if (batch->num_cliprects) { | | 661 | if (batch->num_cliprects) { |
662 | cliprects = kcalloc(batch->num_cliprects, | | 662 | cliprects = kcalloc(batch->num_cliprects, |
663 | sizeof(struct drm_clip_rect), | | 663 | sizeof(struct drm_clip_rect), |
664 | GFP_KERNEL); | | 664 | GFP_KERNEL); |
665 | if (cliprects == NULL) | | 665 | if (cliprects == NULL) |
666 | return -ENOMEM; | | 666 | return -ENOMEM; |
667 | | | 667 | |
668 | ret = copy_from_user(cliprects, batch->cliprects, | | 668 | ret = copy_from_user(cliprects, batch->cliprects, |
669 | batch->num_cliprects * | | 669 | batch->num_cliprects * |
670 | sizeof(struct drm_clip_rect)); | | 670 | sizeof(struct drm_clip_rect)); |
671 | if (ret != 0) { | | 671 | if (ret != 0) { |
672 | ret = -EFAULT; | | 672 | ret = -EFAULT; |
673 | goto fail_free; | | 673 | goto fail_free; |
674 | } | | 674 | } |
675 | } | | 675 | } |
676 | | | 676 | |
677 | mutex_lock(&dev->struct_mutex); | | 677 | mutex_lock(&dev->struct_mutex); |
678 | ret = i915_dispatch_batchbuffer(dev, batch, cliprects); | | 678 | ret = i915_dispatch_batchbuffer(dev, batch, cliprects); |
679 | mutex_unlock(&dev->struct_mutex); | | 679 | mutex_unlock(&dev->struct_mutex); |
680 | | | 680 | |
681 | if (sarea_priv) | | 681 | if (sarea_priv) |
682 | sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); | | 682 | sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); |
683 | | | 683 | |
684 | fail_free: | | 684 | fail_free: |
685 | kfree(cliprects); | | 685 | kfree(cliprects); |
686 | | | 686 | |
687 | return ret; | | 687 | return ret; |
688 | } | | 688 | } |
689 | | | 689 | |
690 | static int i915_cmdbuffer(struct drm_device *dev, void *data, | | 690 | static int i915_cmdbuffer(struct drm_device *dev, void *data, |
691 | struct drm_file *file_priv) | | 691 | struct drm_file *file_priv) |
692 | { | | 692 | { |
693 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | | 693 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
694 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | | 694 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
695 | drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) | | 695 | drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) |
696 | master_priv->sarea_priv; | | 696 | master_priv->sarea_priv; |
697 | drm_i915_cmdbuffer_t *cmdbuf = data; | | 697 | drm_i915_cmdbuffer_t *cmdbuf = data; |
698 | struct drm_clip_rect *cliprects = NULL; | | 698 | struct drm_clip_rect *cliprects = NULL; |
699 | void *batch_data; | | 699 | void *batch_data; |
700 | int ret; | | 700 | int ret; |
701 | | | 701 | |
702 | DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n", | | 702 | DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n", |
703 | cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects); | | 703 | cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects); |
704 | | | 704 | |
705 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | | 705 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
706 | return -ENODEV; | | 706 | return -ENODEV; |
707 | | | 707 | |
708 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); | | 708 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); |
709 | | | 709 | |
710 | if (cmdbuf->num_cliprects < 0) | | 710 | if (cmdbuf->num_cliprects < 0) |
711 | return -EINVAL; | | 711 | return -EINVAL; |
712 | | | 712 | |
713 | batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL); | | 713 | batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL); |
714 | if (batch_data == NULL) | | 714 | if (batch_data == NULL) |
715 | return -ENOMEM; | | 715 | return -ENOMEM; |
716 | | | 716 | |
717 | ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz); | | 717 | ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz); |
718 | if (ret != 0) { | | 718 | if (ret != 0) { |
719 | ret = -EFAULT; | | 719 | ret = -EFAULT; |
720 | goto fail_batch_free; | | 720 | goto fail_batch_free; |
721 | } | | 721 | } |
722 | | | 722 | |
723 | if (cmdbuf->num_cliprects) { | | 723 | if (cmdbuf->num_cliprects) { |
724 | cliprects = kcalloc(cmdbuf->num_cliprects, | | 724 | cliprects = kcalloc(cmdbuf->num_cliprects, |
725 | sizeof(struct drm_clip_rect), GFP_KERNEL); | | 725 | sizeof(struct drm_clip_rect), GFP_KERNEL); |
726 | if (cliprects == NULL) { | | 726 | if (cliprects == NULL) { |
727 | ret = -ENOMEM; | | 727 | ret = -ENOMEM; |
728 | goto fail_batch_free; | | 728 | goto fail_batch_free; |
729 | } | | 729 | } |
730 | | | 730 | |
731 | ret = copy_from_user(cliprects, cmdbuf->cliprects, | | 731 | ret = copy_from_user(cliprects, cmdbuf->cliprects, |
732 | cmdbuf->num_cliprects * | | 732 | cmdbuf->num_cliprects * |
733 | sizeof(struct drm_clip_rect)); | | 733 | sizeof(struct drm_clip_rect)); |
734 | if (ret != 0) { | | 734 | if (ret != 0) { |
735 | ret = -EFAULT; | | 735 | ret = -EFAULT; |
736 | goto fail_clip_free; | | 736 | goto fail_clip_free; |
737 | } | | 737 | } |
738 | } | | 738 | } |
739 | | | 739 | |
740 | mutex_lock(&dev->struct_mutex); | | 740 | mutex_lock(&dev->struct_mutex); |
741 | ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data); | | 741 | ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data); |
742 | mutex_unlock(&dev->struct_mutex); | | 742 | mutex_unlock(&dev->struct_mutex); |
743 | if (ret) { | | 743 | if (ret) { |
744 | DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); | | 744 | DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); |
745 | goto fail_clip_free; | | 745 | goto fail_clip_free; |
746 | } | | 746 | } |
747 | | | 747 | |
748 | if (sarea_priv) | | 748 | if (sarea_priv) |
749 | sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); | | 749 | sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); |
750 | | | 750 | |
751 | fail_clip_free: | | 751 | fail_clip_free: |
752 | kfree(cliprects); | | 752 | kfree(cliprects); |
753 | fail_batch_free: | | 753 | fail_batch_free: |
754 | kfree(batch_data); | | 754 | kfree(batch_data); |
755 | | | 755 | |
756 | return ret; | | 756 | return ret; |
757 | } | | 757 | } |
758 | | | 758 | |
759 | static int i915_emit_irq(struct drm_device * dev) | | 759 | static int i915_emit_irq(struct drm_device * dev) |
760 | { | | 760 | { |
761 | drm_i915_private_t *dev_priv = dev->dev_private; | | 761 | drm_i915_private_t *dev_priv = dev->dev_private; |
762 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | | 762 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
763 | | | 763 | |
764 | i915_kernel_lost_context(dev); | | 764 | i915_kernel_lost_context(dev); |
765 | | | 765 | |
766 | DRM_DEBUG_DRIVER("\n"); | | 766 | DRM_DEBUG_DRIVER("\n"); |
767 | | | 767 | |
768 | dev_priv->dri1.counter++; | | 768 | dev_priv->dri1.counter++; |
769 | if (dev_priv->dri1.counter > 0x7FFFFFFFUL) | | 769 | if (dev_priv->dri1.counter > 0x7FFFFFFFUL) |
770 | dev_priv->dri1.counter = 1; | | 770 | dev_priv->dri1.counter = 1; |
771 | if (master_priv->sarea_priv) | | 771 | if (master_priv->sarea_priv) |
772 | master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter; | | 772 | master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter; |
773 | | | 773 | |
774 | if (BEGIN_LP_RING(4) == 0) { | | 774 | if (BEGIN_LP_RING(4) == 0) { |
775 | OUT_RING(MI_STORE_DWORD_INDEX); | | 775 | OUT_RING(MI_STORE_DWORD_INDEX); |
776 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | | 776 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
777 | OUT_RING(dev_priv->dri1.counter); | | 777 | OUT_RING(dev_priv->dri1.counter); |
778 | OUT_RING(MI_USER_INTERRUPT); | | 778 | OUT_RING(MI_USER_INTERRUPT); |
779 | ADVANCE_LP_RING(); | | 779 | ADVANCE_LP_RING(); |
780 | } | | 780 | } |
781 | | | 781 | |
782 | return dev_priv->dri1.counter; | | 782 | return dev_priv->dri1.counter; |
783 | } | | 783 | } |
784 | | | 784 | |
785 | static int i915_wait_irq(struct drm_device * dev, int irq_nr) | | 785 | static int i915_wait_irq(struct drm_device * dev, int irq_nr) |
786 | { | | 786 | { |
787 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | | 787 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
788 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | | 788 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
789 | int ret = 0; | | 789 | int ret = 0; |
790 | struct intel_ring_buffer *ring = LP_RING(dev_priv); | | 790 | struct intel_ring_buffer *ring = LP_RING(dev_priv); |
791 | | | 791 | |
792 | DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr, | | 792 | DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr, |
793 | READ_BREADCRUMB(dev_priv)); | | 793 | READ_BREADCRUMB(dev_priv)); |
794 | | | 794 | |
795 | if (READ_BREADCRUMB(dev_priv) >= irq_nr) { | | 795 | if (READ_BREADCRUMB(dev_priv) >= irq_nr) { |
796 | if (master_priv->sarea_priv) | | 796 | if (master_priv->sarea_priv) |
797 | master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); | | 797 | master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); |
798 | return 0; | | 798 | return 0; |
799 | } | | 799 | } |
800 | | | 800 | |
801 | if (master_priv->sarea_priv) | | 801 | if (master_priv->sarea_priv) |
802 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; | | 802 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; |
803 | | | 803 | |
804 | if (ring->irq_get(ring)) { | | 804 | if (ring->irq_get(ring)) { |
805 | #ifdef __NetBSD__ | | 805 | #ifdef __NetBSD__ |
806 | DRM_TIMED_WAIT_UNTIL(ret, &ring->irq_queue, &drm_global_mutex, | | 806 | DRM_TIMED_WAIT_UNTIL(ret, &ring->irq_queue, &drm_global_mutex, |
807 | 3 * DRM_HZ, | | 807 | 3 * DRM_HZ, |
808 | READ_BREADCRUMB(dev_priv) >= irq_nr); | | 808 | READ_BREADCRUMB(dev_priv) >= irq_nr); |
809 | #else | | 809 | #else |
810 | DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ, | | 810 | DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ, |
811 | READ_BREADCRUMB(dev_priv) >= irq_nr); | | 811 | READ_BREADCRUMB(dev_priv) >= irq_nr); |
812 | #endif | | 812 | #endif |
813 | ring->irq_put(ring); | | 813 | ring->irq_put(ring); |
814 | } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000)) | | 814 | } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000)) |
815 | ret = -EBUSY; | | 815 | ret = -EBUSY; |
816 | | | 816 | |
817 | if (ret == -EBUSY) { | | 817 | if (ret == -EBUSY) { |
818 | DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", | | 818 | DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", |
819 | READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter); | | 819 | READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter); |
820 | } | | 820 | } |
821 | | | 821 | |
822 | return ret; | | 822 | return ret; |
823 | } | | 823 | } |
824 | | | 824 | |
825 | /* Needs the lock as it touches the ring. | | 825 | /* Needs the lock as it touches the ring. |
826 | */ | | 826 | */ |
827 | static int i915_irq_emit(struct drm_device *dev, void *data, | | 827 | static int i915_irq_emit(struct drm_device *dev, void *data, |
828 | struct drm_file *file_priv) | | 828 | struct drm_file *file_priv) |
829 | { | | 829 | { |
830 | drm_i915_private_t *dev_priv = dev->dev_private; | | 830 | drm_i915_private_t *dev_priv = dev->dev_private; |
831 | drm_i915_irq_emit_t *emit = data; | | 831 | drm_i915_irq_emit_t *emit = data; |
832 | int result; | | 832 | int result; |
833 | | | 833 | |
834 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | | 834 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
835 | return -ENODEV; | | 835 | return -ENODEV; |
836 | | | 836 | |
837 | #ifdef __NetBSD__ | | 837 | #ifdef __NetBSD__ |
838 | if (!dev_priv || !LP_RING(dev_priv)->virtual_start_mapped) { | | 838 | if (!dev_priv || !LP_RING(dev_priv)->virtual_start_mapped) { |
839 | DRM_ERROR("called with no initialization\n"); | | 839 | DRM_ERROR("called with no initialization\n"); |
840 | return -EINVAL; | | 840 | return -EINVAL; |
841 | } | | 841 | } |
842 | #else | | 842 | #else |
843 | if (!dev_priv || !LP_RING(dev_priv)->virtual_start) { | | 843 | if (!dev_priv || !LP_RING(dev_priv)->virtual_start) { |
844 | DRM_ERROR("called with no initialization\n"); | | 844 | DRM_ERROR("called with no initialization\n"); |
845 | return -EINVAL; | | 845 | return -EINVAL; |
846 | } | | 846 | } |
847 | #endif | | 847 | #endif |
848 | | | 848 | |
849 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); | | 849 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); |
850 | | | 850 | |
851 | mutex_lock(&dev->struct_mutex); | | 851 | mutex_lock(&dev->struct_mutex); |
852 | result = i915_emit_irq(dev); | | 852 | result = i915_emit_irq(dev); |
853 | mutex_unlock(&dev->struct_mutex); | | 853 | mutex_unlock(&dev->struct_mutex); |
854 | | | 854 | |
855 | if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) { | | 855 | if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) { |
856 | DRM_ERROR("copy_to_user\n"); | | 856 | DRM_ERROR("copy_to_user\n"); |
857 | return -EFAULT; | | 857 | return -EFAULT; |
858 | } | | 858 | } |
859 | | | 859 | |
860 | return 0; | | 860 | return 0; |
861 | } | | 861 | } |
862 | | | 862 | |
863 | /* Doesn't need the hardware lock. | | 863 | /* Doesn't need the hardware lock. |
864 | */ | | 864 | */ |
865 | static int i915_irq_wait(struct drm_device *dev, void *data, | | 865 | static int i915_irq_wait(struct drm_device *dev, void *data, |
866 | struct drm_file *file_priv) | | 866 | struct drm_file *file_priv) |
867 | { | | 867 | { |
868 | drm_i915_private_t *dev_priv = dev->dev_private; | | 868 | drm_i915_private_t *dev_priv = dev->dev_private; |
869 | drm_i915_irq_wait_t *irqwait = data; | | 869 | drm_i915_irq_wait_t *irqwait = data; |
870 | | | 870 | |
871 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | | 871 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
872 | return -ENODEV; | | 872 | return -ENODEV; |
873 | | | 873 | |
874 | if (!dev_priv) { | | 874 | if (!dev_priv) { |
875 | DRM_ERROR("called with no initialization\n"); | | 875 | DRM_ERROR("called with no initialization\n"); |
876 | return -EINVAL; | | 876 | return -EINVAL; |
877 | } | | 877 | } |
878 | | | 878 | |
879 | return i915_wait_irq(dev, irqwait->irq_seq); | | 879 | return i915_wait_irq(dev, irqwait->irq_seq); |
880 | } | | 880 | } |
881 | | | 881 | |
882 | static int i915_vblank_pipe_get(struct drm_device *dev, void *data, | | 882 | static int i915_vblank_pipe_get(struct drm_device *dev, void *data, |
883 | struct drm_file *file_priv) | | 883 | struct drm_file *file_priv) |
884 | { | | 884 | { |
885 | drm_i915_private_t *dev_priv = dev->dev_private; | | 885 | drm_i915_private_t *dev_priv = dev->dev_private; |
886 | drm_i915_vblank_pipe_t *pipe = data; | | 886 | drm_i915_vblank_pipe_t *pipe = data; |
887 | | | 887 | |
888 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | | 888 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
889 | return -ENODEV; | | 889 | return -ENODEV; |
890 | | | 890 | |
891 | if (!dev_priv) { | | 891 | if (!dev_priv) { |
892 | DRM_ERROR("called with no initialization\n"); | | 892 | DRM_ERROR("called with no initialization\n"); |
893 | return -EINVAL; | | 893 | return -EINVAL; |
894 | } | | 894 | } |
895 | | | 895 | |
896 | pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; | | 896 | pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; |
897 | | | 897 | |
898 | return 0; | | 898 | return 0; |
899 | } | | 899 | } |
900 | | | 900 | |
901 | /** | | 901 | /** |
902 | * Schedule buffer swap at given vertical blank. | | 902 | * Schedule buffer swap at given vertical blank. |
903 | */ | | 903 | */ |
904 | static int i915_vblank_swap(struct drm_device *dev, void *data, | | 904 | static int i915_vblank_swap(struct drm_device *dev, void *data, |
905 | struct drm_file *file_priv) | | 905 | struct drm_file *file_priv) |
906 | { | | 906 | { |
907 | /* The delayed swap mechanism was fundamentally racy, and has been | | 907 | /* The delayed swap mechanism was fundamentally racy, and has been |
908 | * removed. The model was that the client requested a delayed flip/swap | | 908 | * removed. The model was that the client requested a delayed flip/swap |
909 | * from the kernel, then waited for vblank before continuing to perform | | 909 | * from the kernel, then waited for vblank before continuing to perform |
910 | * rendering. The problem was that the kernel might wake the client | | 910 | * rendering. The problem was that the kernel might wake the client |
911 | * up before it dispatched the vblank swap (since the lock has to be | | 911 | * up before it dispatched the vblank swap (since the lock has to be |
912 | * held while touching the ringbuffer), in which case the client would | | 912 | * held while touching the ringbuffer), in which case the client would |
913 | * clear and start the next frame before the swap occurred, and | | 913 | * clear and start the next frame before the swap occurred, and |
914 | * flicker would occur in addition to likely missing the vblank. | | 914 | * flicker would occur in addition to likely missing the vblank. |
915 | * | | 915 | * |
916 | * In the absence of this ioctl, userland falls back to a correct path | | 916 | * In the absence of this ioctl, userland falls back to a correct path |
917 | * of waiting for a vblank, then dispatching the swap on its own. | | 917 | * of waiting for a vblank, then dispatching the swap on its own. |
918 | * Context switching to userland and back is plenty fast enough for | | 918 | * Context switching to userland and back is plenty fast enough for |
919 | * meeting the requirements of vblank swapping. | | 919 | * meeting the requirements of vblank swapping. |
920 | */ | | 920 | */ |
921 | return -EINVAL; | | 921 | return -EINVAL; |
922 | } | | 922 | } |
923 | | | 923 | |
924 | static int i915_flip_bufs(struct drm_device *dev, void *data, | | 924 | static int i915_flip_bufs(struct drm_device *dev, void *data, |
925 | struct drm_file *file_priv) | | 925 | struct drm_file *file_priv) |
926 | { | | 926 | { |
927 | int ret; | | 927 | int ret; |
928 | | | 928 | |
929 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | | 929 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
930 | return -ENODEV; | | 930 | return -ENODEV; |
931 | | | 931 | |
932 | DRM_DEBUG_DRIVER("%s\n", __func__); | | 932 | DRM_DEBUG_DRIVER("%s\n", __func__); |
933 | | | 933 | |
934 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); | | 934 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); |
935 | | | 935 | |
936 | mutex_lock(&dev->struct_mutex); | | 936 | mutex_lock(&dev->struct_mutex); |
937 | ret = i915_dispatch_flip(dev); | | 937 | ret = i915_dispatch_flip(dev); |
938 | mutex_unlock(&dev->struct_mutex); | | 938 | mutex_unlock(&dev->struct_mutex); |
939 | | | 939 | |
940 | return ret; | | 940 | return ret; |
941 | } | | 941 | } |
942 | | | 942 | |
943 | static int i915_getparam(struct drm_device *dev, void *data, | | 943 | static int i915_getparam(struct drm_device *dev, void *data, |
944 | struct drm_file *file_priv) | | 944 | struct drm_file *file_priv) |
945 | { | | 945 | { |
946 | drm_i915_private_t *dev_priv = dev->dev_private; | | 946 | drm_i915_private_t *dev_priv = dev->dev_private; |
947 | drm_i915_getparam_t *param = data; | | 947 | drm_i915_getparam_t *param = data; |
948 | int value; | | 948 | int value; |
949 | | | 949 | |
950 | if (!dev_priv) { | | 950 | if (!dev_priv) { |
951 | DRM_ERROR("called with no initialization\n"); | | 951 | DRM_ERROR("called with no initialization\n"); |
952 | return -EINVAL; | | 952 | return -EINVAL; |
953 | } | | 953 | } |
954 | | | 954 | |
955 | switch (param->param) { | | 955 | switch (param->param) { |
956 | case I915_PARAM_IRQ_ACTIVE: | | 956 | case I915_PARAM_IRQ_ACTIVE: |
957 | #ifdef __NetBSD__ | | 957 | #ifdef __NetBSD__ |
958 | /* XXX This is the old code; why was it changed upstream? */ | | 958 | /* XXX This is the old code; why was it changed upstream? */ |
959 | value = dev->irq_enabled ? 1 : 0; | | 959 | value = dev->irq_enabled ? 1 : 0; |
960 | #else | | 960 | #else |
961 | value = dev->pdev->irq ? 1 : 0; | | 961 | value = dev->pdev->irq ? 1 : 0; |
962 | #endif | | 962 | #endif |
963 | break; | | 963 | break; |
964 | case I915_PARAM_ALLOW_BATCHBUFFER: | | 964 | case I915_PARAM_ALLOW_BATCHBUFFER: |
965 | value = dev_priv->dri1.allow_batchbuffer ? 1 : 0; | | 965 | value = dev_priv->dri1.allow_batchbuffer ? 1 : 0; |
966 | break; | | 966 | break; |
967 | case I915_PARAM_LAST_DISPATCH: | | 967 | case I915_PARAM_LAST_DISPATCH: |
968 | value = READ_BREADCRUMB(dev_priv); | | 968 | value = READ_BREADCRUMB(dev_priv); |
969 | break; | | 969 | break; |
970 | case I915_PARAM_CHIPSET_ID: | | 970 | case I915_PARAM_CHIPSET_ID: |
971 | value = dev->pci_device; | | 971 | value = dev->pci_device; |
972 | break; | | 972 | break; |
973 | case I915_PARAM_HAS_GEM: | | 973 | case I915_PARAM_HAS_GEM: |
974 | value = 1; | | 974 | value = 1; |
975 | break; | | 975 | break; |
976 | case I915_PARAM_NUM_FENCES_AVAIL: | | 976 | case I915_PARAM_NUM_FENCES_AVAIL: |
977 | value = dev_priv->num_fence_regs - dev_priv->fence_reg_start; | | 977 | value = dev_priv->num_fence_regs - dev_priv->fence_reg_start; |
978 | break; | | 978 | break; |
979 | case I915_PARAM_HAS_OVERLAY: | | 979 | case I915_PARAM_HAS_OVERLAY: |
980 | value = dev_priv->overlay ? 1 : 0; | | 980 | value = dev_priv->overlay ? 1 : 0; |
981 | break; | | 981 | break; |
982 | case I915_PARAM_HAS_PAGEFLIPPING: | | 982 | case I915_PARAM_HAS_PAGEFLIPPING: |
983 | value = 1; | | 983 | value = 1; |
984 | break; | | 984 | break; |
985 | case I915_PARAM_HAS_EXECBUF2: | | 985 | case I915_PARAM_HAS_EXECBUF2: |
986 | /* depends on GEM */ | | 986 | /* depends on GEM */ |
987 | value = 1; | | 987 | value = 1; |
988 | break; | | 988 | break; |
989 | case I915_PARAM_HAS_BSD: | | 989 | case I915_PARAM_HAS_BSD: |
990 | value = intel_ring_initialized(&dev_priv->ring[VCS]); | | 990 | value = intel_ring_initialized(&dev_priv->ring[VCS]); |
991 | break; | | 991 | break; |
992 | case I915_PARAM_HAS_BLT: | | 992 | case I915_PARAM_HAS_BLT: |
993 | value = intel_ring_initialized(&dev_priv->ring[BCS]); | | 993 | value = intel_ring_initialized(&dev_priv->ring[BCS]); |
994 | break; | | 994 | break; |
995 | case I915_PARAM_HAS_RELAXED_FENCING: | | 995 | case I915_PARAM_HAS_RELAXED_FENCING: |
996 | value = 1; | | 996 | value = 1; |
997 | break; | | 997 | break; |
998 | case I915_PARAM_HAS_COHERENT_RINGS: | | 998 | case I915_PARAM_HAS_COHERENT_RINGS: |
999 | value = 1; | | 999 | value = 1; |
1000 | break; | | 1000 | break; |
1001 | case I915_PARAM_HAS_EXEC_CONSTANTS: | | 1001 | case I915_PARAM_HAS_EXEC_CONSTANTS: |
1002 | value = INTEL_INFO(dev)->gen >= 4; | | 1002 | value = INTEL_INFO(dev)->gen >= 4; |
1003 | break; | | 1003 | break; |
1004 | case I915_PARAM_HAS_RELAXED_DELTA: | | 1004 | case I915_PARAM_HAS_RELAXED_DELTA: |
1005 | value = 1; | | 1005 | value = 1; |
1006 | break; | | 1006 | break; |
1007 | case I915_PARAM_HAS_GEN7_SOL_RESET: | | 1007 | case I915_PARAM_HAS_GEN7_SOL_RESET: |
1008 | value = 1; | | 1008 | value = 1; |
1009 | break; | | 1009 | break; |
1010 | case I915_PARAM_HAS_LLC: | | 1010 | case I915_PARAM_HAS_LLC: |
1011 | value = HAS_LLC(dev); | | 1011 | value = HAS_LLC(dev); |
1012 | break; | | 1012 | break; |
1013 | case I915_PARAM_HAS_ALIASING_PPGTT: | | 1013 | case I915_PARAM_HAS_ALIASING_PPGTT: |
1014 | value = dev_priv->mm.aliasing_ppgtt ? 1 : 0; | | 1014 | value = dev_priv->mm.aliasing_ppgtt ? 1 : 0; |
1015 | break; | | 1015 | break; |
1016 | case I915_PARAM_HAS_WAIT_TIMEOUT: | | 1016 | case I915_PARAM_HAS_WAIT_TIMEOUT: |
1017 | value = 1; | | 1017 | value = 1; |
1018 | break; | | 1018 | break; |
1019 | case I915_PARAM_HAS_SEMAPHORES: | | 1019 | case I915_PARAM_HAS_SEMAPHORES: |
1020 | value = i915_semaphore_is_enabled(dev); | | 1020 | value = i915_semaphore_is_enabled(dev); |
1021 | break; | | 1021 | break; |
1022 | case I915_PARAM_HAS_PRIME_VMAP_FLUSH: | | 1022 | case I915_PARAM_HAS_PRIME_VMAP_FLUSH: |
1023 | value = 1; | | 1023 | value = 1; |
1024 | break; | | 1024 | break; |
1025 | case I915_PARAM_HAS_SECURE_BATCHES: | | 1025 | case I915_PARAM_HAS_SECURE_BATCHES: |
1026 | #ifdef __NetBSD__ | | 1026 | #ifdef __NetBSD__ |
1027 | value = DRM_SUSER(); | | 1027 | value = DRM_SUSER(); |
1028 | #else | | 1028 | #else |
1029 | value = capable(CAP_SYS_ADMIN); | | 1029 | value = capable(CAP_SYS_ADMIN); |
1030 | #endif | | 1030 | #endif |
1031 | break; | | 1031 | break; |
1032 | case I915_PARAM_HAS_PINNED_BATCHES: | | 1032 | case I915_PARAM_HAS_PINNED_BATCHES: |
1033 | value = 1; | | 1033 | value = 1; |
1034 | break; | | 1034 | break; |
1035 | default: | | 1035 | default: |
1036 | DRM_DEBUG_DRIVER("Unknown parameter %d\n", | | 1036 | DRM_DEBUG_DRIVER("Unknown parameter %d\n", |
1037 | param->param); | | 1037 | param->param); |
1038 | return -EINVAL; | | 1038 | return -EINVAL; |
1039 | } | | 1039 | } |
1040 | | | 1040 | |
1041 | if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { | | 1041 | if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { |
1042 | DRM_ERROR("DRM_COPY_TO_USER failed\n"); | | 1042 | DRM_ERROR("DRM_COPY_TO_USER failed\n"); |
1043 | return -EFAULT; | | 1043 | return -EFAULT; |
1044 | } | | 1044 | } |
1045 | | | 1045 | |
1046 | return 0; | | 1046 | return 0; |
1047 | } | | 1047 | } |
1048 | | | 1048 | |
1049 | static int i915_setparam(struct drm_device *dev, void *data, | | 1049 | static int i915_setparam(struct drm_device *dev, void *data, |
1050 | struct drm_file *file_priv) | | 1050 | struct drm_file *file_priv) |
1051 | { | | 1051 | { |
1052 | drm_i915_private_t *dev_priv = dev->dev_private; | | 1052 | drm_i915_private_t *dev_priv = dev->dev_private; |
1053 | drm_i915_setparam_t *param = data; | | 1053 | drm_i915_setparam_t *param = data; |
1054 | | | 1054 | |
1055 | if (!dev_priv) { | | 1055 | if (!dev_priv) { |
1056 | DRM_ERROR("called with no initialization\n"); | | 1056 | DRM_ERROR("called with no initialization\n"); |
1057 | return -EINVAL; | | 1057 | return -EINVAL; |
1058 | } | | 1058 | } |
1059 | | | 1059 | |
1060 | switch (param->param) { | | 1060 | switch (param->param) { |
1061 | case I915_SETPARAM_USE_MI_BATCHBUFFER_START: | | 1061 | case I915_SETPARAM_USE_MI_BATCHBUFFER_START: |
1062 | break; | | 1062 | break; |
1063 | case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: | | 1063 | case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: |
1064 | break; | | 1064 | break; |
1065 | case I915_SETPARAM_ALLOW_BATCHBUFFER: | | 1065 | case I915_SETPARAM_ALLOW_BATCHBUFFER: |
1066 | dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0; | | 1066 | dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0; |
1067 | break; | | 1067 | break; |
1068 | case I915_SETPARAM_NUM_USED_FENCES: | | 1068 | case I915_SETPARAM_NUM_USED_FENCES: |
1069 | if (param->value > dev_priv->num_fence_regs || | | 1069 | if (param->value > dev_priv->num_fence_regs || |
1070 | param->value < 0) | | 1070 | param->value < 0) |
1071 | return -EINVAL; | | 1071 | return -EINVAL; |
1072 | /* Userspace can use first N regs */ | | 1072 | /* Userspace can use first N regs */ |
1073 | dev_priv->fence_reg_start = param->value; | | 1073 | dev_priv->fence_reg_start = param->value; |
1074 | break; | | 1074 | break; |
1075 | default: | | 1075 | default: |
1076 | DRM_DEBUG_DRIVER("unknown parameter %d\n", | | 1076 | DRM_DEBUG_DRIVER("unknown parameter %d\n", |
1077 | param->param); | | 1077 | param->param); |
1078 | return -EINVAL; | | 1078 | return -EINVAL; |
1079 | } | | 1079 | } |
1080 | | | 1080 | |
1081 | return 0; | | 1081 | return 0; |
1082 | } | | 1082 | } |
1083 | | | 1083 | |
1084 | static int i915_set_status_page(struct drm_device *dev, void *data, | | 1084 | static int i915_set_status_page(struct drm_device *dev, void *data, |
1085 | struct drm_file *file_priv) | | 1085 | struct drm_file *file_priv) |
1086 | { | | 1086 | { |
1087 | drm_i915_private_t *dev_priv = dev->dev_private; | | 1087 | drm_i915_private_t *dev_priv = dev->dev_private; |
1088 | drm_i915_hws_addr_t *hws = data; | | 1088 | drm_i915_hws_addr_t *hws = data; |
1089 | struct intel_ring_buffer *ring; | | 1089 | struct intel_ring_buffer *ring; |
1090 | #ifdef __NetBSD__ | | 1090 | #ifdef __NetBSD__ |
1091 | struct drm_local_map *const gfx_hws_cpu_map = | | 1091 | struct drm_local_map *const gfx_hws_cpu_map = |
1092 | &dev_priv->dri1.gfx_hws_cpu_map; | | 1092 | &dev_priv->dri1.gfx_hws_cpu_map; |
1093 | #endif | | 1093 | #endif |
1094 | | | 1094 | |
1095 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | | 1095 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
1096 | return -ENODEV; | | 1096 | return -ENODEV; |
1097 | | | 1097 | |
1098 | if (!I915_NEED_GFX_HWS(dev)) | | 1098 | if (!I915_NEED_GFX_HWS(dev)) |
1099 | return -EINVAL; | | 1099 | return -EINVAL; |
1100 | | | 1100 | |
1101 | if (!dev_priv) { | | 1101 | if (!dev_priv) { |
1102 | DRM_ERROR("called with no initialization\n"); | | 1102 | DRM_ERROR("called with no initialization\n"); |
1103 | return -EINVAL; | | 1103 | return -EINVAL; |
1104 | } | | 1104 | } |
1105 | | | 1105 | |
1106 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | | 1106 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
1107 | WARN(1, "tried to set status page when mode setting active\n"); | | 1107 | WARN(1, "tried to set status page when mode setting active\n"); |
1108 | return 0; | | 1108 | return 0; |
1109 | } | | 1109 | } |
1110 | | | 1110 | |
1111 | DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr); | | 1111 | DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr); |
1112 | | | 1112 | |
1113 | ring = LP_RING(dev_priv); | | 1113 | ring = LP_RING(dev_priv); |
1114 | ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12); | | 1114 | ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12); |
1115 | | | 1115 | |
1116 | #ifdef __NetBSD__ | | 1116 | #ifdef __NetBSD__ |
1117 | gfx_hws_cpu_map->offset = (dev_priv->mm.gtt_base_addr + | | 1117 | gfx_hws_cpu_map->offset = (dev_priv->mm.gtt_base_addr + |
1118 | hws->addr); | | 1118 | hws->addr); |
1119 | gfx_hws_cpu_map->size = 4096; | | 1119 | gfx_hws_cpu_map->size = 4096; |
1120 | gfx_hws_cpu_map->flags = 0; | | 1120 | gfx_hws_cpu_map->flags = 0; |
1121 | gfx_hws_cpu_map->flags |= _DRM_RESTRICTED; | | 1121 | gfx_hws_cpu_map->flags |= _DRM_RESTRICTED; |
1122 | gfx_hws_cpu_map->flags |= _DRM_KERNEL; | | 1122 | gfx_hws_cpu_map->flags |= _DRM_KERNEL; |
1123 | gfx_hws_cpu_map->flags |= _DRM_WRITE_COMBINING; | | 1123 | gfx_hws_cpu_map->flags |= _DRM_WRITE_COMBINING; |
1124 | gfx_hws_cpu_map->flags |= _DRM_DRIVER; | | 1124 | gfx_hws_cpu_map->flags |= _DRM_DRIVER; |
1125 | if (drm_ioremap(dev, gfx_hws_cpu_map) == NULL) { | | 1125 | if (drm_ioremap(dev, gfx_hws_cpu_map) == NULL) { |
1126 | i915_dma_cleanup(dev); | | 1126 | i915_dma_cleanup(dev); |
1127 | ring->status_page.gfx_addr = 0; | | 1127 | ring->status_page.gfx_addr = 0; |
1128 | DRM_ERROR("can not ioremap virtual address for" | | 1128 | DRM_ERROR("can not ioremap virtual address for" |
1129 | " G33 hw status page\n"); | | 1129 | " G33 hw status page\n"); |
1130 | return -ENOMEM; | | 1130 | return -ENOMEM; |
1131 | } | | 1131 | } |
1132 | | | 1132 | |
1133 | /* XXX drm_local_map abstraction violation. Pooh. */ | | 1133 | /* XXX drm_local_map abstraction violation. Pooh. */ |
1134 | bus_space_set_region_1(gfx_hws_cpu_map->lm_data.bus_space.bst, | | 1134 | bus_space_set_region_1(gfx_hws_cpu_map->lm_data.bus_space.bst, |
1135 | gfx_hws_cpu_map->lm_data.bus_space.bsh, 0, 0, PAGE_SIZE); | | 1135 | gfx_hws_cpu_map->lm_data.bus_space.bsh, 0, 0, PAGE_SIZE); |
1136 | #else | | 1136 | #else |
1137 | dev_priv->dri1.gfx_hws_cpu_addr = | | 1137 | dev_priv->dri1.gfx_hws_cpu_addr = |
1138 | ioremap_wc(dev_priv->mm.gtt_base_addr + hws->addr, 4096); | | 1138 | ioremap_wc(dev_priv->mm.gtt_base_addr + hws->addr, 4096); |
1139 | if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) { | | 1139 | if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) { |
1140 | i915_dma_cleanup(dev); | | 1140 | i915_dma_cleanup(dev); |
1141 | ring->status_page.gfx_addr = 0; | | 1141 | ring->status_page.gfx_addr = 0; |
1142 | DRM_ERROR("can not ioremap virtual address for" | | 1142 | DRM_ERROR("can not ioremap virtual address for" |
1143 | " G33 hw status page\n"); | | 1143 | " G33 hw status page\n"); |
1144 | return -ENOMEM; | | 1144 | return -ENOMEM; |
1145 | } | | 1145 | } |
1146 | | | 1146 | |
1147 | memset_io(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE); | | 1147 | memset_io(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE); |
1148 | #endif | | 1148 | #endif |
1149 | | | 1149 | |
1150 | I915_WRITE(HWS_PGA, ring->status_page.gfx_addr); | | 1150 | I915_WRITE(HWS_PGA, ring->status_page.gfx_addr); |
1151 | | | 1151 | |
1152 | DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n", | | 1152 | DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n", |
1153 | ring->status_page.gfx_addr); | | 1153 | ring->status_page.gfx_addr); |
1154 | DRM_DEBUG_DRIVER("load hws at %p\n", | | 1154 | DRM_DEBUG_DRIVER("load hws at %p\n", |
1155 | ring->status_page.page_addr); | | 1155 | ring->status_page.page_addr); |
1156 | return 0; | | 1156 | return 0; |
1157 | } | | 1157 | } |
1158 | | | 1158 | |
1159 | static int i915_get_bridge_dev(struct drm_device *dev) | | 1159 | static int i915_get_bridge_dev(struct drm_device *dev) |
1160 | { | | 1160 | { |
1161 | struct drm_i915_private *dev_priv = dev->dev_private; | | 1161 | struct drm_i915_private *dev_priv = dev->dev_private; |
1162 | | | 1162 | |
1163 | #ifdef __NetBSD__ | | | |
1164 | dev_priv->bridge_dev = pci_kludgey_find_dev(dev->pdev, 0, 0, 0); | | | |
1165 | #else | | | |
1166 | dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); | | 1163 | dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); |
1167 | #endif | | | |
1168 | if (!dev_priv->bridge_dev) { | | 1164 | if (!dev_priv->bridge_dev) { |
1169 | DRM_ERROR("bridge device not found\n"); | | 1165 | DRM_ERROR("bridge device not found\n"); |
1170 | return -1; | | 1166 | return -1; |
1171 | } | | 1167 | } |
1172 | return 0; | | 1168 | return 0; |
1173 | } | | 1169 | } |
1174 | | | 1170 | |
1175 | #define MCHBAR_I915 0x44 | | 1171 | #define MCHBAR_I915 0x44 |
1176 | #define MCHBAR_I965 0x48 | | 1172 | #define MCHBAR_I965 0x48 |
1177 | #define MCHBAR_SIZE (4*4096) | | 1173 | #define MCHBAR_SIZE (4*4096) |
1178 | | | 1174 | |
1179 | #define DEVEN_REG 0x54 | | 1175 | #define DEVEN_REG 0x54 |
1180 | #define DEVEN_MCHBAR_EN (1 << 28) | | 1176 | #define DEVEN_MCHBAR_EN (1 << 28) |
1181 | | | 1177 | |
1182 | /* Allocate space for the MCH regs if needed, return nonzero on error */ | | 1178 | /* Allocate space for the MCH regs if needed, return nonzero on error */ |
1183 | static int | | 1179 | static int |
1184 | intel_alloc_mchbar_resource(struct drm_device *dev) | | 1180 | intel_alloc_mchbar_resource(struct drm_device *dev) |
1185 | { | | 1181 | { |
1186 | drm_i915_private_t *dev_priv = dev->dev_private; | | 1182 | drm_i915_private_t *dev_priv = dev->dev_private; |
1187 | int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; | | 1183 | int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; |
1188 | u32 temp_lo, temp_hi = 0; | | 1184 | u32 temp_lo, temp_hi = 0; |
1189 | u64 mchbar_addr; | | 1185 | u64 mchbar_addr; |
1190 | int ret; | | 1186 | int ret; |
1191 | | | 1187 | |
1192 | if (INTEL_INFO(dev)->gen >= 4) | | 1188 | if (INTEL_INFO(dev)->gen >= 4) |
1193 | pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); | | 1189 | pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); |
1194 | pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo); | | 1190 | pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo); |
1195 | mchbar_addr = ((u64)temp_hi << 32) | temp_lo; | | 1191 | mchbar_addr = ((u64)temp_hi << 32) | temp_lo; |
1196 | | | 1192 | |
1197 | /* If ACPI doesn't have it, assume we need to allocate it ourselves */ | | 1193 | /* If ACPI doesn't have it, assume we need to allocate it ourselves */ |
1198 | #ifdef CONFIG_PNP | | 1194 | #ifdef CONFIG_PNP |
1199 | if (mchbar_addr && | | 1195 | if (mchbar_addr && |
1200 | pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) | | 1196 | pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) |
1201 | return 0; | | 1197 | return 0; |
1202 | #endif | | 1198 | #endif |
1203 | | | 1199 | |
1204 | /* Get some space for it */ | | 1200 | /* Get some space for it */ |
1205 | dev_priv->mch_res.name = "i915 MCHBAR"; | | 1201 | dev_priv->mch_res.name = "i915 MCHBAR"; |
1206 | dev_priv->mch_res.flags = IORESOURCE_MEM; | | 1202 | dev_priv->mch_res.flags = IORESOURCE_MEM; |
1207 | ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, | | 1203 | ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, |
1208 | &dev_priv->mch_res, | | 1204 | &dev_priv->mch_res, |
1209 | MCHBAR_SIZE, MCHBAR_SIZE, | | 1205 | MCHBAR_SIZE, MCHBAR_SIZE, |
1210 | PCIBIOS_MIN_MEM, | | 1206 | PCIBIOS_MIN_MEM, |
1211 | 0, pcibios_align_resource, | | 1207 | 0, pcibios_align_resource, |
1212 | dev_priv->bridge_dev); | | 1208 | dev_priv->bridge_dev); |
1213 | if (ret) { | | 1209 | if (ret) { |
1214 | DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret); | | 1210 | DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret); |
1215 | dev_priv->mch_res.start = 0; | | 1211 | dev_priv->mch_res.start = 0; |
1216 | return ret; | | 1212 | return ret; |
1217 | } | | 1213 | } |
1218 | | | 1214 | |
1219 | if (INTEL_INFO(dev)->gen >= 4) | | 1215 | if (INTEL_INFO(dev)->gen >= 4) |
1220 | pci_write_config_dword(dev_priv->bridge_dev, reg + 4, | | 1216 | pci_write_config_dword(dev_priv->bridge_dev, reg + 4, |
1221 | upper_32_bits(dev_priv->mch_res.start)); | | 1217 | upper_32_bits(dev_priv->mch_res.start)); |
1222 | | | 1218 | |
1223 | pci_write_config_dword(dev_priv->bridge_dev, reg, | | 1219 | pci_write_config_dword(dev_priv->bridge_dev, reg, |
1224 | lower_32_bits(dev_priv->mch_res.start)); | | 1220 | lower_32_bits(dev_priv->mch_res.start)); |
1225 | return 0; | | 1221 | return 0; |
1226 | } | | 1222 | } |
1227 | | | 1223 | |
1228 | /* Setup MCHBAR if possible, return true if we should disable it again */ | | 1224 | /* Setup MCHBAR if possible, return true if we should disable it again */ |
1229 | static void | | 1225 | static void |
1230 | intel_setup_mchbar(struct drm_device *dev) | | 1226 | intel_setup_mchbar(struct drm_device *dev) |
1231 | { | | 1227 | { |
1232 | drm_i915_private_t *dev_priv = dev->dev_private; | | 1228 | drm_i915_private_t *dev_priv = dev->dev_private; |
1233 | int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; | | 1229 | int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; |
1234 | u32 temp; | | 1230 | u32 temp; |
1235 | bool enabled; | | 1231 | bool enabled; |
1236 | | | 1232 | |
1237 | dev_priv->mchbar_need_disable = false; | | 1233 | dev_priv->mchbar_need_disable = false; |
1238 | | | 1234 | |
1239 | if (IS_I915G(dev) || IS_I915GM(dev)) { | | 1235 | if (IS_I915G(dev) || IS_I915GM(dev)) { |
1240 | pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); | | 1236 | pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); |
1241 | enabled = !!(temp & DEVEN_MCHBAR_EN); | | 1237 | enabled = !!(temp & DEVEN_MCHBAR_EN); |
1242 | } else { | | 1238 | } else { |
1243 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); | | 1239 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); |
1244 | enabled = temp & 1; | | 1240 | enabled = temp & 1; |
1245 | } | | 1241 | } |
1246 | | | 1242 | |
1247 | /* If it's already enabled, don't have to do anything */ | | 1243 | /* If it's already enabled, don't have to do anything */ |
1248 | if (enabled) | | 1244 | if (enabled) |
1249 | return; | | 1245 | return; |
1250 | | | 1246 | |
1251 | if (intel_alloc_mchbar_resource(dev)) | | 1247 | if (intel_alloc_mchbar_resource(dev)) |
1252 | return; | | 1248 | return; |
1253 | | | 1249 | |
1254 | dev_priv->mchbar_need_disable = true; | | 1250 | dev_priv->mchbar_need_disable = true; |
1255 | | | 1251 | |
1256 | /* Space is allocated or reserved, so enable it. */ | | 1252 | /* Space is allocated or reserved, so enable it. */ |
1257 | if (IS_I915G(dev) || IS_I915GM(dev)) { | | 1253 | if (IS_I915G(dev) || IS_I915GM(dev)) { |
1258 | pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, | | 1254 | pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, |
1259 | temp | DEVEN_MCHBAR_EN); | | 1255 | temp | DEVEN_MCHBAR_EN); |
1260 | } else { | | 1256 | } else { |
1261 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); | | 1257 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); |
1262 | pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1); | | 1258 | pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1); |
1263 | } | | 1259 | } |
1264 | } | | 1260 | } |
1265 | | | 1261 | |
1266 | static void | | 1262 | static void |
1267 | intel_teardown_mchbar(struct drm_device *dev) | | 1263 | intel_teardown_mchbar(struct drm_device *dev) |
1268 | { | | 1264 | { |
1269 | drm_i915_private_t *dev_priv = dev->dev_private; | | 1265 | drm_i915_private_t *dev_priv = dev->dev_private; |
1270 | int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; | | 1266 | int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; |
1271 | u32 temp; | | 1267 | u32 temp; |
1272 | | | 1268 | |
1273 | if (dev_priv->mchbar_need_disable) { | | 1269 | if (dev_priv->mchbar_need_disable) { |
1274 | if (IS_I915G(dev) || IS_I915GM(dev)) { | | 1270 | if (IS_I915G(dev) || IS_I915GM(dev)) { |
1275 | pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); | | 1271 | pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); |
1276 | temp &= ~DEVEN_MCHBAR_EN; | | 1272 | temp &= ~DEVEN_MCHBAR_EN; |
1277 | pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp); | | 1273 | pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp); |
1278 | } else { | | 1274 | } else { |
1279 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); | | 1275 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); |
1280 | temp &= ~1; | | 1276 | temp &= ~1; |
1281 | pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp); | | 1277 | pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp); |
1282 | } | | 1278 | } |
1283 | } | | 1279 | } |
1284 | | | 1280 | |
1285 | if (dev_priv->mch_res.start) | | 1281 | if (dev_priv->mch_res.start) |
1286 | release_resource(&dev_priv->mch_res); | | 1282 | release_resource(&dev_priv->mch_res); |
1287 | } | | 1283 | } |
1288 | | | 1284 | |
1289 | #ifndef __NetBSD__ /* XXX vga */ | | 1285 | #ifndef __NetBSD__ /* XXX vga */ |
1290 | /* true = enable decode, false = disable decoder */ | | 1286 | /* true = enable decode, false = disable decoder */ |
1291 | static unsigned int i915_vga_set_decode(void *cookie, bool state) | | 1287 | static unsigned int i915_vga_set_decode(void *cookie, bool state) |
1292 | { | | 1288 | { |
1293 | struct drm_device *dev = cookie; | | 1289 | struct drm_device *dev = cookie; |
1294 | | | 1290 | |
1295 | intel_modeset_vga_set_state(dev, state); | | 1291 | intel_modeset_vga_set_state(dev, state); |
1296 | if (state) | | 1292 | if (state) |
1297 | return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | | | 1293 | return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | |
1298 | VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; | | 1294 | VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; |
1299 | else | | 1295 | else |
1300 | return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; | | 1296 | return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; |
1301 | } | | 1297 | } |
1302 | | | 1298 | |
1303 | static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) | | 1299 | static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) |
1304 | { | | 1300 | { |
1305 | struct drm_device *dev = pci_get_drvdata(pdev); | | 1301 | struct drm_device *dev = pci_get_drvdata(pdev); |
1306 | pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; | | 1302 | pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; |
1307 | if (state == VGA_SWITCHEROO_ON) { | | 1303 | if (state == VGA_SWITCHEROO_ON) { |
1308 | pr_info("switched on\n"); | | 1304 | pr_info("switched on\n"); |
1309 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; | | 1305 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; |
1310 | /* i915 resume handler doesn't set to D0 */ | | 1306 | /* i915 resume handler doesn't set to D0 */ |
1311 | pci_set_power_state(dev->pdev, PCI_D0); | | 1307 | pci_set_power_state(dev->pdev, PCI_D0); |
1312 | i915_resume(dev); | | 1308 | i915_resume(dev); |
1313 | dev->switch_power_state = DRM_SWITCH_POWER_ON; | | 1309 | dev->switch_power_state = DRM_SWITCH_POWER_ON; |
1314 | } else { | | 1310 | } else { |
1315 | pr_err("switched off\n"); | | 1311 | pr_err("switched off\n"); |
1316 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; | | 1312 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; |
1317 | i915_suspend(dev, pmm); | | 1313 | i915_suspend(dev, pmm); |
1318 | dev->switch_power_state = DRM_SWITCH_POWER_OFF; | | 1314 | dev->switch_power_state = DRM_SWITCH_POWER_OFF; |
1319 | } | | 1315 | } |
1320 | } | | 1316 | } |
1321 | | | 1317 | |
1322 | static bool i915_switcheroo_can_switch(struct pci_dev *pdev) | | 1318 | static bool i915_switcheroo_can_switch(struct pci_dev *pdev) |
1323 | { | | 1319 | { |
1324 | struct drm_device *dev = pci_get_drvdata(pdev); | | 1320 | struct drm_device *dev = pci_get_drvdata(pdev); |
1325 | bool can_switch; | | 1321 | bool can_switch; |
1326 | | | 1322 | |
1327 | spin_lock(&dev->count_lock); | | 1323 | spin_lock(&dev->count_lock); |
1328 | can_switch = (dev->open_count == 0); | | 1324 | can_switch = (dev->open_count == 0); |
1329 | spin_unlock(&dev->count_lock); | | 1325 | spin_unlock(&dev->count_lock); |
1330 | return can_switch; | | 1326 | return can_switch; |
1331 | } | | 1327 | } |
1332 | | | 1328 | |
1333 | static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { | | 1329 | static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { |
1334 | .set_gpu_state = i915_switcheroo_set_state, | | 1330 | .set_gpu_state = i915_switcheroo_set_state, |
1335 | .reprobe = NULL, | | 1331 | .reprobe = NULL, |
1336 | .can_switch = i915_switcheroo_can_switch, | | 1332 | .can_switch = i915_switcheroo_can_switch, |
1337 | }; | | 1333 | }; |
1338 | #endif | | 1334 | #endif |
1339 | | | 1335 | |
1340 | static int i915_load_modeset_init(struct drm_device *dev) | | 1336 | static int i915_load_modeset_init(struct drm_device *dev) |
1341 | { | | 1337 | { |
1342 | struct drm_i915_private *dev_priv = dev->dev_private; | | 1338 | struct drm_i915_private *dev_priv = dev->dev_private; |
1343 | int ret; | | 1339 | int ret; |
1344 | | | 1340 | |
1345 | ret = intel_parse_bios(dev); | | 1341 | ret = intel_parse_bios(dev); |
1346 | if (ret) | | 1342 | if (ret) |
1347 | DRM_INFO("failed to find VBIOS tables\n"); | | 1343 | DRM_INFO("failed to find VBIOS tables\n"); |
1348 | | | 1344 | |
1349 | #ifndef __NetBSD__ /* XXX vga */ | | 1345 | #ifndef __NetBSD__ /* XXX vga */ |
1350 | /* If we have > 1 VGA cards, then we need to arbitrate access | | 1346 | /* If we have > 1 VGA cards, then we need to arbitrate access |
1351 | * to the common VGA resources. | | 1347 | * to the common VGA resources. |
1352 | * | | 1348 | * |
1353 | * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA), | | 1349 | * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA), |
1354 | * then we do not take part in VGA arbitration and the | | 1350 | * then we do not take part in VGA arbitration and the |
1355 | * vga_client_register() fails with -ENODEV. | | 1351 | * vga_client_register() fails with -ENODEV. |
1356 | */ | | 1352 | */ |
1357 | ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); | | 1353 | ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); |
1358 | if (ret && ret != -ENODEV) | | 1354 | if (ret && ret != -ENODEV) |
1359 | goto out; | | 1355 | goto out; |
1360 | #endif | | 1356 | #endif |
1361 | | | 1357 | |
1362 | intel_register_dsm_handler(); | | 1358 | intel_register_dsm_handler(); |
1363 | | | 1359 | |
1364 | #ifndef __NetBSD__ /* XXX vga */ | | 1360 | #ifndef __NetBSD__ /* XXX vga */ |
1365 | ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops); | | 1361 | ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops); |
1366 | if (ret) | | 1362 | if (ret) |
1367 | goto cleanup_vga_client; | | 1363 | goto cleanup_vga_client; |
1368 | #endif | | 1364 | #endif |
1369 | | | 1365 | |
1370 | /* Initialise stolen first so that we may reserve preallocated | | 1366 | /* Initialise stolen first so that we may reserve preallocated |
1371 | * objects for the BIOS to KMS transition. | | 1367 | * objects for the BIOS to KMS transition. |
1372 | */ | | 1368 | */ |
1373 | ret = i915_gem_init_stolen(dev); | | 1369 | ret = i915_gem_init_stolen(dev); |
1374 | if (ret) | | 1370 | if (ret) |
1375 | goto cleanup_vga_switcheroo; | | 1371 | goto cleanup_vga_switcheroo; |
1376 | | | 1372 | |
1377 | intel_modeset_init(dev); | | 1373 | intel_modeset_init(dev); |
1378 | | | 1374 | |
1379 | ret = i915_gem_init(dev); | | 1375 | ret = i915_gem_init(dev); |
1380 | if (ret) | | 1376 | if (ret) |
1381 | goto cleanup_gem_stolen; | | 1377 | goto cleanup_gem_stolen; |
1382 | | | 1378 | |
1383 | intel_modeset_gem_init(dev); | | 1379 | intel_modeset_gem_init(dev); |
1384 | | | 1380 | |
1385 | INIT_WORK(&dev_priv->console_resume_work, intel_console_resume); | | 1381 | INIT_WORK(&dev_priv->console_resume_work, intel_console_resume); |
1386 | | | 1382 | |
1387 | ret = drm_irq_install(dev); | | 1383 | ret = drm_irq_install(dev); |
1388 | if (ret) | | 1384 | if (ret) |
1389 | goto cleanup_gem; | | 1385 | goto cleanup_gem; |
1390 | | | 1386 | |
1391 | /* Always safe in the mode setting case. */ | | 1387 | /* Always safe in the mode setting case. */ |
1392 | /* FIXME: do pre/post-mode set stuff in core KMS code */ | | 1388 | /* FIXME: do pre/post-mode set stuff in core KMS code */ |
1393 | dev->vblank_disable_allowed = 1; | | 1389 | dev->vblank_disable_allowed = 1; |
1394 | | | 1390 | |
1395 | #ifndef __NetBSD__ /* XXX fb */ | | 1391 | #ifndef __NetBSD__ /* XXX fb */ |
1396 | ret = intel_fbdev_init(dev); | | 1392 | ret = intel_fbdev_init(dev); |
1397 | if (ret) | | 1393 | if (ret) |
1398 | goto cleanup_irq; | | 1394 | goto cleanup_irq; |
1399 | #endif | | 1395 | #endif |
1400 | | | 1396 | |
1401 | drm_kms_helper_poll_init(dev); | | 1397 | drm_kms_helper_poll_init(dev); |
1402 | | | 1398 | |
1403 | /* We're off and running w/KMS */ | | 1399 | /* We're off and running w/KMS */ |
1404 | dev_priv->mm.suspended = 0; | | 1400 | dev_priv->mm.suspended = 0; |
1405 | | | 1401 | |
1406 | return 0; | | 1402 | return 0; |
1407 | | | 1403 | |
1408 | #ifndef __NetBSD__ /* XXX fb */ | | 1404 | #ifndef __NetBSD__ /* XXX fb */ |
1409 | cleanup_irq: | | 1405 | cleanup_irq: |
1410 | drm_irq_uninstall(dev); | | 1406 | drm_irq_uninstall(dev); |
1411 | #endif | | 1407 | #endif |
1412 | cleanup_gem: | | 1408 | cleanup_gem: |
1413 | mutex_lock(&dev->struct_mutex); | | 1409 | mutex_lock(&dev->struct_mutex); |
1414 | i915_gem_cleanup_ringbuffer(dev); | | 1410 | i915_gem_cleanup_ringbuffer(dev); |
1415 | mutex_unlock(&dev->struct_mutex); | | 1411 | mutex_unlock(&dev->struct_mutex); |
1416 | i915_gem_cleanup_aliasing_ppgtt(dev); | | 1412 | i915_gem_cleanup_aliasing_ppgtt(dev); |
1417 | cleanup_gem_stolen: | | 1413 | cleanup_gem_stolen: |
1418 | i915_gem_cleanup_stolen(dev); | | 1414 | i915_gem_cleanup_stolen(dev); |
1419 | cleanup_vga_switcheroo: | | 1415 | cleanup_vga_switcheroo: |
1420 | #ifndef __NetBSD__ /* XXX vga */ | | 1416 | #ifndef __NetBSD__ /* XXX vga */ |
1421 | vga_switcheroo_unregister_client(dev->pdev); | | 1417 | vga_switcheroo_unregister_client(dev->pdev); |
1422 | cleanup_vga_client: | | 1418 | cleanup_vga_client: |
1423 | vga_client_register(dev->pdev, NULL, NULL, NULL); | | 1419 | vga_client_register(dev->pdev, NULL, NULL, NULL); |
1424 | out: | | 1420 | out: |
1425 | #endif | | 1421 | #endif |
1426 | return ret; | | 1422 | return ret; |
1427 | } | | 1423 | } |
1428 | | | 1424 | |
1429 | int i915_master_create(struct drm_device *dev, struct drm_master *master) | | 1425 | int i915_master_create(struct drm_device *dev, struct drm_master *master) |
1430 | { | | 1426 | { |
1431 | struct drm_i915_master_private *master_priv; | | 1427 | struct drm_i915_master_private *master_priv; |
1432 | | | 1428 | |
1433 | master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL); | | 1429 | master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL); |
1434 | if (!master_priv) | | 1430 | if (!master_priv) |
1435 | return -ENOMEM; | | 1431 | return -ENOMEM; |
1436 | | | 1432 | |
1437 | master->driver_priv = master_priv; | | 1433 | master->driver_priv = master_priv; |
1438 | return 0; | | 1434 | return 0; |
1439 | } | | 1435 | } |
1440 | | | 1436 | |
1441 | void i915_master_destroy(struct drm_device *dev, struct drm_master *master) | | 1437 | void i915_master_destroy(struct drm_device *dev, struct drm_master *master) |
1442 | { | | 1438 | { |
1443 | struct drm_i915_master_private *master_priv = master->driver_priv; | | 1439 | struct drm_i915_master_private *master_priv = master->driver_priv; |
1444 | | | 1440 | |
1445 | if (!master_priv) | | 1441 | if (!master_priv) |
1446 | return; | | 1442 | return; |
1447 | | | 1443 | |
1448 | kfree(master_priv); | | 1444 | kfree(master_priv); |
1449 | | | 1445 | |
1450 | master->driver_priv = NULL; | | 1446 | master->driver_priv = NULL; |
1451 | } | | 1447 | } |
1452 | | | 1448 | |
1453 | static void | | 1449 | static void |
1454 | i915_mtrr_setup(struct drm_i915_private *dev_priv, unsigned long base, | | 1450 | i915_mtrr_setup(struct drm_i915_private *dev_priv, unsigned long base, |
1455 | unsigned long size) | | 1451 | unsigned long size) |
1456 | { | | 1452 | { |
1457 | dev_priv->mm.gtt_mtrr = -1; | | 1453 | dev_priv->mm.gtt_mtrr = -1; |
1458 | | | 1454 | |
1459 | #if defined(CONFIG_X86_PAT) | | 1455 | #if defined(CONFIG_X86_PAT) |
1460 | if (cpu_has_pat) | | 1456 | if (cpu_has_pat) |
1461 | return; | | 1457 | return; |
1462 | #endif | | 1458 | #endif |
1463 | | | 1459 | |
1464 | /* Set up a WC MTRR for non-PAT systems. This is more common than | | 1460 | /* Set up a WC MTRR for non-PAT systems. This is more common than |
1465 | * one would think, because the kernel disables PAT on first | | 1461 | * one would think, because the kernel disables PAT on first |
1466 | * generation Core chips because WC PAT gets overridden by a UC | | 1462 | * generation Core chips because WC PAT gets overridden by a UC |
1467 | * MTRR if present. Even if a UC MTRR isn't present. | | 1463 | * MTRR if present. Even if a UC MTRR isn't present. |
1468 | */ | | 1464 | */ |
1469 | dev_priv->mm.gtt_mtrr = mtrr_add(base, size, MTRR_TYPE_WRCOMB, 1); | | 1465 | dev_priv->mm.gtt_mtrr = mtrr_add(base, size, MTRR_TYPE_WRCOMB, 1); |
1470 | if (dev_priv->mm.gtt_mtrr < 0) { | | 1466 | if (dev_priv->mm.gtt_mtrr < 0) { |
1471 | DRM_INFO("MTRR allocation failed. Graphics " | | 1467 | DRM_INFO("MTRR allocation failed. Graphics " |
1472 | "performance may suffer.\n"); | | 1468 | "performance may suffer.\n"); |
1473 | } | | 1469 | } |
1474 | } | | 1470 | } |
1475 | | | 1471 | |
1476 | #ifndef __NetBSD__ /* XXX fb */ | | 1472 | #ifndef __NetBSD__ /* XXX fb */ |
1477 | static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) | | 1473 | static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) |
1478 | { | | 1474 | { |
1479 | struct apertures_struct *ap; | | 1475 | struct apertures_struct *ap; |
1480 | struct pci_dev *pdev = dev_priv->dev->pdev; | | 1476 | struct pci_dev *pdev = dev_priv->dev->pdev; |
1481 | bool primary; | | 1477 | bool primary; |
1482 | | | 1478 | |
1483 | ap = alloc_apertures(1); | | 1479 | ap = alloc_apertures(1); |
1484 | if (!ap) | | 1480 | if (!ap) |
1485 | return; | | 1481 | return; |
1486 | | | 1482 | |
1487 | ap->ranges[0].base = dev_priv->mm.gtt->gma_bus_addr; | | 1483 | ap->ranges[0].base = dev_priv->mm.gtt->gma_bus_addr; |
1488 | ap->ranges[0].size = | | 1484 | ap->ranges[0].size = |
1489 | dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; | | 1485 | dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; |
1490 | primary = | | 1486 | primary = |
1491 | pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; | | 1487 | pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; |
1492 | | | 1488 | |
1493 | remove_conflicting_framebuffers(ap, "inteldrmfb", primary); | | 1489 | remove_conflicting_framebuffers(ap, "inteldrmfb", primary); |
1494 | | | 1490 | |
1495 | kfree(ap); | | 1491 | kfree(ap); |
1496 | } | | 1492 | } |
1497 | #endif | | 1493 | #endif |
1498 | | | 1494 | |
1499 | static void i915_dump_device_info(struct drm_i915_private *dev_priv) | | 1495 | static void i915_dump_device_info(struct drm_i915_private *dev_priv) |
1500 | { | | 1496 | { |
1501 | const struct intel_device_info *info = dev_priv->info; | | 1497 | const struct intel_device_info *info = dev_priv->info; |
1502 | | | 1498 | |
1503 | #define DEV_INFO_FLAG(name) info->name ? #name "," : "" | | 1499 | #define DEV_INFO_FLAG(name) info->name ? #name "," : "" |
1504 | #define DEV_INFO_SEP , | | 1500 | #define DEV_INFO_SEP , |
1505 | DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags=" | | 1501 | DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags=" |
1506 | "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", | | 1502 | "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", |
1507 | info->gen, | | 1503 | info->gen, |
1508 | dev_priv->dev->pdev->device, | | 1504 | dev_priv->dev->pdev->device, |
1509 | DEV_INFO_FLAGS); | | 1505 | DEV_INFO_FLAGS); |
1510 | #undef DEV_INFO_FLAG | | 1506 | #undef DEV_INFO_FLAG |
1511 | #undef DEV_INFO_SEP | | 1507 | #undef DEV_INFO_SEP |
1512 | } | | 1508 | } |
1513 | | | 1509 | |
1514 | /** | | 1510 | /** |
1515 | * i915_driver_load - setup chip and create an initial config | | 1511 | * i915_driver_load - setup chip and create an initial config |
1516 | * @dev: DRM device | | 1512 | * @dev: DRM device |
1517 | * @flags: startup flags | | 1513 | * @flags: startup flags |
1518 | * | | 1514 | * |
1519 | * The driver load routine has to do several things: | | 1515 | * The driver load routine has to do several things: |
1520 | * - drive output discovery via intel_modeset_init() | | 1516 | * - drive output discovery via intel_modeset_init() |
1521 | * - initialize the memory manager | | 1517 | * - initialize the memory manager |
1522 | * - allocate initial config memory | | 1518 | * - allocate initial config memory |
1523 | * - setup the DRM framebuffer with the allocated memory | | 1519 | * - setup the DRM framebuffer with the allocated memory |
1524 | */ | | 1520 | */ |
1525 | int i915_driver_load(struct drm_device *dev, unsigned long flags) | | 1521 | int i915_driver_load(struct drm_device *dev, unsigned long flags) |
1526 | { | | 1522 | { |
1527 | struct drm_i915_private *dev_priv; | | 1523 | struct drm_i915_private *dev_priv; |
1528 | struct intel_device_info *info; | | 1524 | struct intel_device_info *info; |
1529 | int ret = 0, mmio_bar, mmio_size; | | 1525 | int ret = 0, mmio_bar, mmio_size; |
1530 | uint32_t aperture_size; | | 1526 | uint32_t aperture_size; |
1531 | | | 1527 | |
1532 | info = (struct intel_device_info *) flags; | | 1528 | info = (struct intel_device_info *) flags; |
1533 | | | 1529 | |
1534 | /* Refuse to load on gen6+ without kms enabled. */ | | 1530 | /* Refuse to load on gen6+ without kms enabled. */ |
1535 | if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) | | 1531 | if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) |
1536 | return -ENODEV; | | 1532 | return -ENODEV; |
1537 | | | 1533 | |
1538 | /* i915 has 4 more counters */ | | 1534 | /* i915 has 4 more counters */ |
1539 | dev->counters += 4; | | 1535 | dev->counters += 4; |
1540 | dev->types[6] = _DRM_STAT_IRQ; | | 1536 | dev->types[6] = _DRM_STAT_IRQ; |
1541 | dev->types[7] = _DRM_STAT_PRIMARY; | | 1537 | dev->types[7] = _DRM_STAT_PRIMARY; |
1542 | dev->types[8] = _DRM_STAT_SECONDARY; | | 1538 | dev->types[8] = _DRM_STAT_SECONDARY; |
1543 | dev->types[9] = _DRM_STAT_DMA; | | 1539 | dev->types[9] = _DRM_STAT_DMA; |
1544 | | | 1540 | |
1545 | dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL); | | 1541 | dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL); |
1546 | if (dev_priv == NULL) | | 1542 | if (dev_priv == NULL) |
1547 | return -ENOMEM; | | 1543 | return -ENOMEM; |
1548 | | | 1544 | |
1549 | dev->dev_private = (void *)dev_priv; | | 1545 | dev->dev_private = (void *)dev_priv; |
1550 | dev_priv->dev = dev; | | 1546 | dev_priv->dev = dev; |
1551 | dev_priv->info = info; | | 1547 | dev_priv->info = info; |
1552 | | | 1548 | |
1553 | i915_dump_device_info(dev_priv); | | 1549 | i915_dump_device_info(dev_priv); |
1554 | | | 1550 | |
1555 | if (i915_get_bridge_dev(dev)) { | | 1551 | if (i915_get_bridge_dev(dev)) { |
1556 | ret = -EIO; | | 1552 | ret = -EIO; |
1557 | goto free_priv; | | 1553 | goto free_priv; |
1558 | } | | 1554 | } |
1559 | | | 1555 | |
1560 | ret = i915_gem_gtt_init(dev); | | 1556 | ret = i915_gem_gtt_init(dev); |
1561 | if (ret) | | 1557 | if (ret) |
1562 | goto put_bridge; | | 1558 | goto put_bridge; |
1563 | | | 1559 | |
1564 | #ifndef __NetBSD__ /* XXX fb */ | | 1560 | #ifndef __NetBSD__ /* XXX fb */ |
1565 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | | 1561 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
1566 | i915_kick_out_firmware_fb(dev_priv); | | 1562 | i915_kick_out_firmware_fb(dev_priv); |
1567 | #endif | | 1563 | #endif |
1568 | | | 1564 | |
1569 | pci_set_master(dev->pdev); | | 1565 | pci_set_master(dev->pdev); |
1570 | | | 1566 | |
1571 | /* overlay on gen2 is broken and can't address above 1G */ | | 1567 | /* overlay on gen2 is broken and can't address above 1G */ |
1572 | if (IS_GEN2(dev)) | | 1568 | if (IS_GEN2(dev)) |
1573 | #ifdef __NetBSD__ | | 1569 | #ifdef __NetBSD__ |
1574 | { | | 1570 | { |
1575 | ret = drm_limit_dma_space(dev, 0, 0x3fffffffUL); | | 1571 | ret = drm_limit_dma_space(dev, 0, 0x3fffffffUL); |
1576 | if (ret) | | 1572 | if (ret) |
1577 | goto put_gmch; | | 1573 | goto put_gmch; |
1578 | } | | 1574 | } |
1579 | #else | | 1575 | #else |
1580 | dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); | | 1576 | dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); |
1581 | #endif | | 1577 | #endif |
1582 | | | 1578 | |
1583 | /* 965GM sometimes incorrectly writes to hardware status page (HWS) | | 1579 | /* 965GM sometimes incorrectly writes to hardware status page (HWS) |
1584 | * using 32bit addressing, overwriting memory if HWS is located | | 1580 | * using 32bit addressing, overwriting memory if HWS is located |
1585 | * above 4GB. | | 1581 | * above 4GB. |
1586 | * | | 1582 | * |
1587 | * The documentation also mentions an issue with undefined | | 1583 | * The documentation also mentions an issue with undefined |
1588 | * behaviour if any general state is accessed within a page above 4GB, | | 1584 | * behaviour if any general state is accessed within a page above 4GB, |
1589 | * which also needs to be handled carefully. | | 1585 | * which also needs to be handled carefully. |
1590 | */ | | 1586 | */ |
1591 | if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) | | 1587 | if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) |
1592 | #ifdef __NetBSD__ | | 1588 | #ifdef __NetBSD__ |
1593 | { | | 1589 | { |
1594 | ret = drm_limit_dma_space(dev, 0, 0xffffffffUL); | | 1590 | ret = drm_limit_dma_space(dev, 0, 0xffffffffUL); |
1595 | if (ret) | | 1591 | if (ret) |
1596 | goto put_gmch; | | 1592 | goto put_gmch; |
1597 | } | | 1593 | } |
1598 | #else | | 1594 | #else |
1599 | dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); | | 1595 | dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); |
1600 | #endif | | 1596 | #endif |
1601 | | | 1597 | |
1602 | mmio_bar = IS_GEN2(dev) ? 1 : 0; | | 1598 | mmio_bar = IS_GEN2(dev) ? 1 : 0; |
1603 | /* Before gen4, the registers and the GTT are behind different BARs. | | 1599 | /* Before gen4, the registers and the GTT are behind different BARs. |
1604 | * However, from gen4 onwards, the registers and the GTT are shared | | 1600 | * However, from gen4 onwards, the registers and the GTT are shared |
1605 | * in the same BAR, so we want to restrict this ioremap from | | 1601 | * in the same BAR, so we want to restrict this ioremap from |
1606 | * clobbering the GTT which we want ioremap_wc instead. Fortunately, | | 1602 | * clobbering the GTT which we want ioremap_wc instead. Fortunately, |
1607 | * the register BAR remains the same size for all the earlier | | 1603 | * the register BAR remains the same size for all the earlier |
1608 | * generations up to Ironlake. | | 1604 | * generations up to Ironlake. |
1609 | */ | | 1605 | */ |
1610 | if (info->gen < 5) | | 1606 | if (info->gen < 5) |
1611 | mmio_size = 512*1024; | | 1607 | mmio_size = 512*1024; |
1612 | else | | 1608 | else |
1613 | mmio_size = 2*1024*1024; | | 1609 | mmio_size = 2*1024*1024; |
1614 | | | 1610 | |
1615 | #ifdef __NetBSD__ | | 1611 | #ifdef __NetBSD__ |
1616 | /* XXX Maybe it would be better to just use pci_mapreg_map... */ | | 1612 | /* XXX Maybe it would be better to just use pci_mapreg_map... */ |
1617 | { | | 1613 | { |
1618 | bus_addr_t addr; | | 1614 | bus_addr_t addr; |
1619 | bus_size_t size; | | 1615 | bus_size_t size; |
1620 | | | 1616 | |
1621 | if (pci_mapreg_info(dev->pdev->pd_pa.pa_pc, | | 1617 | if (pci_mapreg_info(dev->pdev->pd_pa.pa_pc, |
1622 | dev->pdev->pd_pa.pa_tag, mmio_bar, PCI_MAPREG_TYPE_MEM, | | 1618 | dev->pdev->pd_pa.pa_tag, mmio_bar, PCI_MAPREG_TYPE_MEM, |
1623 | &addr, &size, NULL /* XXX flags? */)) { | | 1619 | &addr, &size, NULL /* XXX flags? */)) { |
1624 | ret = -EIO; /* XXX */ | | 1620 | ret = -EIO; /* XXX */ |
1625 | goto put_gmch; | | 1621 | goto put_gmch; |
1626 | } | | 1622 | } |
1627 | | | 1623 | |
1628 | ret = drm_addmap(dev, addr, size, _DRM_REGISTERS, | | 1624 | ret = drm_addmap(dev, addr, size, _DRM_REGISTERS, |
1629 | (_DRM_KERNEL | _DRM_DRIVER), &dev_priv->regs_map); | | 1625 | (_DRM_KERNEL | _DRM_DRIVER), &dev_priv->regs_map); |
1630 | if (ret) | | 1626 | if (ret) |
1631 | goto put_gmch; | | 1627 | goto put_gmch; |
1632 | } | | 1628 | } |
1633 | #else | | 1629 | #else |
1634 | dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size); | | 1630 | dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size); |
1635 | if (!dev_priv->regs) { | | 1631 | if (!dev_priv->regs) { |
1636 | DRM_ERROR("failed to map registers\n"); | | 1632 | DRM_ERROR("failed to map registers\n"); |
1637 | ret = -EIO; | | 1633 | ret = -EIO; |
1638 | goto put_gmch; | | 1634 | goto put_gmch; |
1639 | } | | 1635 | } |
1640 | #endif | | 1636 | #endif |
1641 | | | 1637 | |
1642 | aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; | | 1638 | aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; |
1643 | dev_priv->mm.gtt_base_addr = dev_priv->mm.gtt->gma_bus_addr; | | 1639 | dev_priv->mm.gtt_base_addr = dev_priv->mm.gtt->gma_bus_addr; |
1644 | | | 1640 | |
1645 | #ifdef __NetBSD__ | | 1641 | #ifdef __NetBSD__ |
1646 | dev_priv->mm.gtt_mapping = | | 1642 | dev_priv->mm.gtt_mapping = |
1647 | drm_io_mapping_create_wc(dev, dev_priv->mm.gtt_base_addr, | | 1643 | drm_io_mapping_create_wc(dev, dev_priv->mm.gtt_base_addr, |
1648 | aperture_size); | | 1644 | aperture_size); |
1649 | #else | | 1645 | #else |
1650 | dev_priv->mm.gtt_mapping = | | 1646 | dev_priv->mm.gtt_mapping = |
1651 | io_mapping_create_wc(dev_priv->mm.gtt_base_addr, | | 1647 | io_mapping_create_wc(dev_priv->mm.gtt_base_addr, |
1652 | aperture_size); | | 1648 | aperture_size); |
1653 | #endif | | 1649 | #endif |
1654 | if (dev_priv->mm.gtt_mapping == NULL) { | | 1650 | if (dev_priv->mm.gtt_mapping == NULL) { |
1655 | ret = -EIO; | | 1651 | ret = -EIO; |
1656 | goto out_rmmap; | | 1652 | goto out_rmmap; |
1657 | } | | 1653 | } |
1658 | | | 1654 | |
1659 | i915_mtrr_setup(dev_priv, dev_priv->mm.gtt_base_addr, | | 1655 | i915_mtrr_setup(dev_priv, dev_priv->mm.gtt_base_addr, |
1660 | aperture_size); | | 1656 | aperture_size); |
1661 | | | 1657 | |
1662 | /* The i915 workqueue is primarily used for batched retirement of | | 1658 | /* The i915 workqueue is primarily used for batched retirement of |
1663 | * requests (and thus managing bo) once the task has been completed | | 1659 | * requests (and thus managing bo) once the task has been completed |
1664 | * by the GPU. i915_gem_retire_requests() is called directly when we | | 1660 | * by the GPU. i915_gem_retire_requests() is called directly when we |
1665 | * need high-priority retirement, such as waiting for an explicit | | 1661 | * need high-priority retirement, such as waiting for an explicit |
1666 | * bo. | | 1662 | * bo. |
1667 | * | | 1663 | * |
1668 | * It is also used for periodic low-priority events, such as | | 1664 | * It is also used for periodic low-priority events, such as |
1669 | * idle-timers and recording error state. | | 1665 | * idle-timers and recording error state. |
1670 | * | | 1666 | * |
1671 | * All tasks on the workqueue are expected to acquire the dev mutex | | 1667 | * All tasks on the workqueue are expected to acquire the dev mutex |
1672 | * so there is no point in running more than one instance of the | | 1668 | * so there is no point in running more than one instance of the |
1673 | * workqueue at any time. Use an ordered one. | | 1669 | * workqueue at any time. Use an ordered one. |
1674 | */ | | 1670 | */ |
1675 | dev_priv->wq = alloc_ordered_workqueue("i915", 0); | | 1671 | dev_priv->wq = alloc_ordered_workqueue("i915", 0); |
1676 | if (dev_priv->wq == NULL) { | | 1672 | if (dev_priv->wq == NULL) { |
1677 | DRM_ERROR("Failed to create our workqueue.\n"); | | 1673 | DRM_ERROR("Failed to create our workqueue.\n"); |
1678 | ret = -ENOMEM; | | 1674 | ret = -ENOMEM; |
1679 | goto out_mtrrfree; | | 1675 | goto out_mtrrfree; |
1680 | } | | 1676 | } |
1681 | | | 1677 | |
1682 | /* This must be called before any calls to HAS_PCH_* */ | | 1678 | /* This must be called before any calls to HAS_PCH_* */ |
1683 | intel_detect_pch(dev); | | 1679 | intel_detect_pch(dev); |
1684 | | | 1680 | |
1685 | intel_irq_init(dev); | | 1681 | intel_irq_init(dev); |
1686 | intel_gt_init(dev); | | 1682 | intel_gt_init(dev); |
1687 | | | 1683 | |
1688 | /* Try to make sure MCHBAR is enabled before poking at it */ | | 1684 | /* Try to make sure MCHBAR is enabled before poking at it */ |
1689 | intel_setup_mchbar(dev); | | 1685 | intel_setup_mchbar(dev); |
1690 | intel_setup_gmbus(dev); | | 1686 | intel_setup_gmbus(dev); |
1691 | intel_opregion_setup(dev); | | 1687 | intel_opregion_setup(dev); |
1692 | | | 1688 | |
1693 | intel_setup_bios(dev); | | 1689 | intel_setup_bios(dev); |
1694 | | | 1690 | |
1695 | i915_gem_load(dev); | | 1691 | i915_gem_load(dev); |
1696 | | | 1692 | |
1697 | /* On the 945G/GM, the chipset reports the MSI capability on the | | 1693 | /* On the 945G/GM, the chipset reports the MSI capability on the |
1698 | * integrated graphics even though the support isn't actually there | | 1694 | * integrated graphics even though the support isn't actually there |
1699 | * according to the published specs. It doesn't appear to function | | 1695 | * according to the published specs. It doesn't appear to function |
1700 | * correctly in testing on 945G. | | 1696 | * correctly in testing on 945G. |
1701 | * This may be a side effect of MSI having been made available for PEG | | 1697 | * This may be a side effect of MSI having been made available for PEG |
1702 | * and the registers being closely associated. | | 1698 | * and the registers being closely associated. |
1703 | * | | 1699 | * |
1704 | * According to chipset errata, on the 965GM, MSI interrupts may | | 1700 | * According to chipset errata, on the 965GM, MSI interrupts may |
1705 | * be lost or delayed, but we use them anyways to avoid | | 1701 | * be lost or delayed, but we use them anyways to avoid |
1706 | * stuck interrupts on some machines. | | 1702 | * stuck interrupts on some machines. |
1707 | */ | | 1703 | */ |
1708 | if (!IS_I945G(dev) && !IS_I945GM(dev)) | | 1704 | if (!IS_I945G(dev) && !IS_I945GM(dev)) |
1709 | pci_enable_msi(dev->pdev); | | 1705 | pci_enable_msi(dev->pdev); |
1710 | | | 1706 | |
1711 | spin_lock_init(&dev_priv->irq_lock); | | 1707 | spin_lock_init(&dev_priv->irq_lock); |
1712 | spin_lock_init(&dev_priv->error_lock); | | 1708 | spin_lock_init(&dev_priv->error_lock); |
1713 | spin_lock_init(&dev_priv->rps.lock); | | 1709 | spin_lock_init(&dev_priv->rps.lock); |
1714 | spin_lock_init(&dev_priv->dpio_lock); | | 1710 | spin_lock_init(&dev_priv->dpio_lock); |
1715 | | | 1711 | |
1716 | #ifdef __NetBSD__ | | 1712 | #ifdef __NetBSD__ |
1717 | linux_mutex_init(&dev_priv->rps.hw_lock); | | 1713 | linux_mutex_init(&dev_priv->rps.hw_lock); |
1718 | #else | | 1714 | #else |
1719 | mutex_init(&dev_priv->rps.hw_lock); | | 1715 | mutex_init(&dev_priv->rps.hw_lock); |
1720 | #endif | | 1716 | #endif |
1721 | | | 1717 | |
1722 | if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) | | 1718 | if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) |
1723 | dev_priv->num_pipe = 3; | | 1719 | dev_priv->num_pipe = 3; |
1724 | else if (IS_MOBILE(dev) || !IS_GEN2(dev)) | | 1720 | else if (IS_MOBILE(dev) || !IS_GEN2(dev)) |
1725 | dev_priv->num_pipe = 2; | | 1721 | dev_priv->num_pipe = 2; |
1726 | else | | 1722 | else |
1727 | dev_priv->num_pipe = 1; | | 1723 | dev_priv->num_pipe = 1; |
1728 | | | 1724 | |
1729 | ret = drm_vblank_init(dev, dev_priv->num_pipe); | | 1725 | ret = drm_vblank_init(dev, dev_priv->num_pipe); |
1730 | if (ret) | | 1726 | if (ret) |
1731 | goto out_gem_unload; | | 1727 | goto out_gem_unload; |
1732 | | | 1728 | |
1733 | /* Start out suspended */ | | 1729 | /* Start out suspended */ |
1734 | dev_priv->mm.suspended = 1; | | 1730 | dev_priv->mm.suspended = 1; |
1735 | | | 1731 | |
1736 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | | 1732 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
1737 | ret = i915_load_modeset_init(dev); | | 1733 | ret = i915_load_modeset_init(dev); |
1738 | if (ret < 0) { | | 1734 | if (ret < 0) { |
1739 | DRM_ERROR("failed to init modeset\n"); | | 1735 | DRM_ERROR("failed to init modeset\n"); |
1740 | goto out_gem_unload; | | 1736 | goto out_gem_unload; |
1741 | } | | 1737 | } |
1742 | } | | 1738 | } |
1743 | | | 1739 | |
1744 | i915_setup_sysfs(dev); | | 1740 | i915_setup_sysfs(dev); |
1745 | | | 1741 | |
1746 | /* Must be done after probing outputs */ | | 1742 | /* Must be done after probing outputs */ |
1747 | intel_opregion_init(dev); | | 1743 | intel_opregion_init(dev); |
1748 | acpi_video_register(); | | 1744 | acpi_video_register(); |
1749 | | | 1745 | |
1750 | setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, | | 1746 | setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, |
1751 | (unsigned long) dev); | | 1747 | (unsigned long) dev); |
1752 | | | 1748 | |
1753 | if (IS_GEN5(dev)) | | 1749 | if (IS_GEN5(dev)) |
1754 | intel_gpu_ips_init(dev_priv); | | 1750 | intel_gpu_ips_init(dev_priv); |
1755 | | | 1751 | |
1756 | return 0; | | 1752 | return 0; |
1757 | | | 1753 | |
1758 | out_gem_unload: | | 1754 | out_gem_unload: |
1759 | if (dev_priv->mm.inactive_shrinker.shrink) | | 1755 | if (dev_priv->mm.inactive_shrinker.shrink) |
1760 | unregister_shrinker(&dev_priv->mm.inactive_shrinker); | | 1756 | unregister_shrinker(&dev_priv->mm.inactive_shrinker); |
1761 | | | 1757 | |
1762 | #ifdef __NetBSD__ | | 1758 | #ifdef __NetBSD__ |
1763 | spin_lock_destroy(&dev_priv->irq_lock); | | 1759 | spin_lock_destroy(&dev_priv->irq_lock); |
1764 | spin_lock_destroy(&dev_priv->error_lock); | | 1760 | spin_lock_destroy(&dev_priv->error_lock); |
1765 | spin_lock_destroy(&dev_priv->rps.lock); | | 1761 | spin_lock_destroy(&dev_priv->rps.lock); |
1766 | spin_lock_destroy(&dev_priv->dpio_lock); | | 1762 | spin_lock_destroy(&dev_priv->dpio_lock); |
1767 | linux_mutex_destroy(&dev_priv->rps.hw_lock); | | 1763 | linux_mutex_destroy(&dev_priv->rps.hw_lock); |
1768 | #endif | | 1764 | #endif |
1769 | | | 1765 | |
1770 | if (dev->pdev->msi_enabled) | | 1766 | if (dev->pdev->msi_enabled) |
1771 | pci_disable_msi(dev->pdev); | | 1767 | pci_disable_msi(dev->pdev); |
1772 | | | 1768 | |
1773 | intel_teardown_gmbus(dev); | | 1769 | intel_teardown_gmbus(dev); |
1774 | intel_teardown_mchbar(dev); | | 1770 | intel_teardown_mchbar(dev); |
1775 | destroy_workqueue(dev_priv->wq); | | 1771 | destroy_workqueue(dev_priv->wq); |
1776 | out_mtrrfree: | | 1772 | out_mtrrfree: |
1777 | if (dev_priv->mm.gtt_mtrr >= 0) { | | 1773 | if (dev_priv->mm.gtt_mtrr >= 0) { |
1778 | mtrr_del(dev_priv->mm.gtt_mtrr, | | 1774 | mtrr_del(dev_priv->mm.gtt_mtrr, |
1779 | dev_priv->mm.gtt_base_addr, | | 1775 | dev_priv->mm.gtt_base_addr, |
1780 | aperture_size); | | 1776 | aperture_size); |
1781 | dev_priv->mm.gtt_mtrr = -1; | | 1777 | dev_priv->mm.gtt_mtrr = -1; |
1782 | } | | 1778 | } |
1783 | io_mapping_free(dev_priv->mm.gtt_mapping); | | 1779 | io_mapping_free(dev_priv->mm.gtt_mapping); |
1784 | out_rmmap: | | 1780 | out_rmmap: |
1785 | #ifdef __NetBSD__ | | 1781 | #ifdef __NetBSD__ |
1786 | (void)drm_rmmap(dev, dev_priv->regs_map); | | 1782 | (void)drm_rmmap(dev, dev_priv->regs_map); |
1787 | #else | | 1783 | #else |
1788 | pci_iounmap(dev->pdev, dev_priv->regs); | | 1784 | pci_iounmap(dev->pdev, dev_priv->regs); |
1789 | #endif | | 1785 | #endif |
1790 | put_gmch: | | 1786 | put_gmch: |
1791 | i915_gem_gtt_fini(dev); | | 1787 | i915_gem_gtt_fini(dev); |
1792 | put_bridge: | | 1788 | put_bridge: |
1793 | pci_dev_put(dev_priv->bridge_dev); | | 1789 | pci_dev_put(dev_priv->bridge_dev); |
1794 | free_priv: | | 1790 | free_priv: |
1795 | kfree(dev_priv); | | 1791 | kfree(dev_priv); |
1796 | return ret; | | 1792 | return ret; |
1797 | } | | 1793 | } |
1798 | | | 1794 | |
1799 | int i915_driver_unload(struct drm_device *dev) | | 1795 | int i915_driver_unload(struct drm_device *dev) |
1800 | { | | 1796 | { |
1801 | struct drm_i915_private *dev_priv = dev->dev_private; | | 1797 | struct drm_i915_private *dev_priv = dev->dev_private; |
1802 | int ret; | | 1798 | int ret; |
1803 | | | 1799 | |
1804 | intel_gpu_ips_teardown(); | | 1800 | intel_gpu_ips_teardown(); |
1805 | | | 1801 | |
1806 | i915_teardown_sysfs(dev); | | 1802 | i915_teardown_sysfs(dev); |
1807 | | | 1803 | |
1808 | if (dev_priv->mm.inactive_shrinker.shrink) | | 1804 | if (dev_priv->mm.inactive_shrinker.shrink) |
1809 | unregister_shrinker(&dev_priv->mm.inactive_shrinker); | | 1805 | unregister_shrinker(&dev_priv->mm.inactive_shrinker); |
1810 | | | 1806 | |
1811 | mutex_lock(&dev->struct_mutex); | | 1807 | mutex_lock(&dev->struct_mutex); |
1812 | ret = i915_gpu_idle(dev); | | 1808 | ret = i915_gpu_idle(dev); |
1813 | if (ret) | | 1809 | if (ret) |
1814 | DRM_ERROR("failed to idle hardware: %d\n", ret); | | 1810 | DRM_ERROR("failed to idle hardware: %d\n", ret); |
1815 | i915_gem_retire_requests(dev); | | 1811 | i915_gem_retire_requests(dev); |
1816 | mutex_unlock(&dev->struct_mutex); | | 1812 | mutex_unlock(&dev->struct_mutex); |
1817 | | | 1813 | |
1818 | /* Cancel the retire work handler, which should be idle now. */ | | 1814 | /* Cancel the retire work handler, which should be idle now. */ |
1819 | cancel_delayed_work_sync(&dev_priv->mm.retire_work); | | 1815 | cancel_delayed_work_sync(&dev_priv->mm.retire_work); |
1820 | | | 1816 | |
1821 | io_mapping_free(dev_priv->mm.gtt_mapping); | | 1817 | io_mapping_free(dev_priv->mm.gtt_mapping); |
1822 | if (dev_priv->mm.gtt_mtrr >= 0) { | | 1818 | if (dev_priv->mm.gtt_mtrr >= 0) { |
1823 | mtrr_del(dev_priv->mm.gtt_mtrr, | | 1819 | mtrr_del(dev_priv->mm.gtt_mtrr, |
1824 | dev_priv->mm.gtt_base_addr, | | 1820 | dev_priv->mm.gtt_base_addr, |
1825 | dev_priv->mm.gtt->gtt_mappable_entries * PAGE_SIZE); | | 1821 | dev_priv->mm.gtt->gtt_mappable_entries * PAGE_SIZE); |
1826 | dev_priv->mm.gtt_mtrr = -1; | | 1822 | dev_priv->mm.gtt_mtrr = -1; |
1827 | } | | 1823 | } |
1828 | | | 1824 | |
1829 | acpi_video_unregister(); | | 1825 | acpi_video_unregister(); |
1830 | | | 1826 | |
1831 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | | 1827 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
1832 | #ifndef __NetBSD__ /* XXX fb */ | | 1828 | #ifndef __NetBSD__ /* XXX fb */ |
1833 | intel_fbdev_fini(dev); | | 1829 | intel_fbdev_fini(dev); |
1834 | #endif | | 1830 | #endif |
1835 | intel_modeset_cleanup(dev); | | 1831 | intel_modeset_cleanup(dev); |
1836 | cancel_work_sync(&dev_priv->console_resume_work); | | 1832 | cancel_work_sync(&dev_priv->console_resume_work); |
1837 | | | 1833 | |
1838 | /* | | 1834 | /* |
1839 | * free the memory space allocated for the child device | | 1835 | * free the memory space allocated for the child device |
1840 | * config parsed from VBT | | 1836 | * config parsed from VBT |
1841 | */ | | 1837 | */ |
1842 | if (dev_priv->child_dev && dev_priv->child_dev_num) { | | 1838 | if (dev_priv->child_dev && dev_priv->child_dev_num) { |
1843 | kfree(dev_priv->child_dev); | | 1839 | kfree(dev_priv->child_dev); |
1844 | dev_priv->child_dev = NULL; | | 1840 | dev_priv->child_dev = NULL; |
1845 | dev_priv->child_dev_num = 0; | | 1841 | dev_priv->child_dev_num = 0; |
1846 | } | | 1842 | } |
1847 | | | 1843 | |
1848 | #ifndef __NetBSD__ /* XXX vga */ | | 1844 | #ifndef __NetBSD__ /* XXX vga */ |
1849 | vga_switcheroo_unregister_client(dev->pdev); | | 1845 | vga_switcheroo_unregister_client(dev->pdev); |
1850 | vga_client_register(dev->pdev, NULL, NULL, NULL); | | 1846 | vga_client_register(dev->pdev, NULL, NULL, NULL); |
1851 | #endif | | 1847 | #endif |
1852 | } | | 1848 | } |
1853 | | | 1849 | |
1854 | /* Free error state after interrupts are fully disabled. */ | | 1850 | /* Free error state after interrupts are fully disabled. */ |
1855 | del_timer_sync(&dev_priv->hangcheck_timer); | | 1851 | del_timer_sync(&dev_priv->hangcheck_timer); |
1856 | cancel_work_sync(&dev_priv->error_work); | | 1852 | cancel_work_sync(&dev_priv->error_work); |
1857 | i915_destroy_error_state(dev); | | 1853 | i915_destroy_error_state(dev); |
1858 | | | 1854 | |
1859 | if (dev->pdev->msi_enabled) | | 1855 | if (dev->pdev->msi_enabled) |
1860 | pci_disable_msi(dev->pdev); | | 1856 | pci_disable_msi(dev->pdev); |
1861 | | | 1857 | |
1862 | intel_opregion_fini(dev); | | 1858 | intel_opregion_fini(dev); |
1863 | | | 1859 | |
1864 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | | 1860 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
1865 | /* Flush any outstanding unpin_work. */ | | 1861 | /* Flush any outstanding unpin_work. */ |
1866 | #ifdef __NetBSD__ | | 1862 | #ifdef __NetBSD__ |
1867 | /* | | 1863 | /* |
1868 | * XXX Keep this updated! (XXX How?) | | 1864 | * XXX Keep this updated! (XXX How?) |
1869 | * (XXX Well, do work queues for real...) | | 1865 | * (XXX Well, do work queues for real...) |
1870 | */ | | 1866 | */ |
1871 | cancel_delayed_work_sync(&dev_priv->mm.retire_work); | | 1867 | cancel_delayed_work_sync(&dev_priv->mm.retire_work); |
1872 | cancel_work_sync(&dev_priv->l3_parity.error_work); | | 1868 | cancel_work_sync(&dev_priv->l3_parity.error_work); |
1873 | cancel_work_sync(&dev_priv->rps.work); | | 1869 | cancel_work_sync(&dev_priv->rps.work); |
1874 | cancel_work_sync(&dev_priv->hotplug_work); | | 1870 | cancel_work_sync(&dev_priv->hotplug_work); |
1875 | cancel_work_sync(&dev_priv->error_work); | | 1871 | cancel_work_sync(&dev_priv->error_work); |
1876 | # if 0 /* XXX How do we get the CRTCs? */ | | 1872 | # if 0 /* XXX How do we get the CRTCs? */ |
1877 | cancel_work_sync(&...intel_crtc->unpin_work); | | 1873 | cancel_work_sync(&...intel_crtc->unpin_work); |
1878 | # endif | | 1874 | # endif |
1879 | #else | | 1875 | #else |
1880 | flush_workqueue(dev_priv->wq); | | 1876 | flush_workqueue(dev_priv->wq); |
1881 | #endif | | 1877 | #endif |
1882 | | | 1878 | |
1883 | mutex_lock(&dev->struct_mutex); | | 1879 | mutex_lock(&dev->struct_mutex); |
1884 | i915_gem_free_all_phys_object(dev); | | 1880 | i915_gem_free_all_phys_object(dev); |
1885 | i915_gem_cleanup_ringbuffer(dev); | | 1881 | i915_gem_cleanup_ringbuffer(dev); |
1886 | i915_gem_context_fini(dev); | | 1882 | i915_gem_context_fini(dev); |
1887 | mutex_unlock(&dev->struct_mutex); | | 1883 | mutex_unlock(&dev->struct_mutex); |
1888 | i915_gem_cleanup_aliasing_ppgtt(dev); | | 1884 | i915_gem_cleanup_aliasing_ppgtt(dev); |
1889 | i915_gem_cleanup_stolen(dev); | | 1885 | i915_gem_cleanup_stolen(dev); |
1890 | drm_mm_takedown(&dev_priv->mm.stolen); | | 1886 | drm_mm_takedown(&dev_priv->mm.stolen); |
1891 | | | 1887 | |
1892 | intel_cleanup_overlay(dev); | | 1888 | intel_cleanup_overlay(dev); |
1893 | | | 1889 | |
1894 | if (!I915_NEED_GFX_HWS(dev)) | | 1890 | if (!I915_NEED_GFX_HWS(dev)) |
1895 | i915_free_hws(dev); | | 1891 | i915_free_hws(dev); |
1896 | } | | 1892 | } |
1897 | | | 1893 | |
1898 | #ifdef __NetBSD__ | | 1894 | #ifdef __NetBSD__ |
1899 | if (dev_priv->regs_map != NULL) | | 1895 | if (dev_priv->regs_map != NULL) |
1900 | (void)drm_rmmap(dev, dev_priv->regs_map); | | 1896 | (void)drm_rmmap(dev, dev_priv->regs_map); |
1901 | #else | | 1897 | #else |
1902 | if (dev_priv->regs != NULL) | | 1898 | if (dev_priv->regs != NULL) |
1903 | pci_iounmap(dev->pdev, dev_priv->regs); | | 1899 | pci_iounmap(dev->pdev, dev_priv->regs); |
1904 | #endif | | 1900 | #endif |
1905 | | | 1901 | |
1906 | intel_teardown_gmbus(dev); | | 1902 | intel_teardown_gmbus(dev); |
1907 | intel_teardown_mchbar(dev); | | 1903 | intel_teardown_mchbar(dev); |
1908 | | | 1904 | |
1909 | destroy_workqueue(dev_priv->wq); | | 1905 | destroy_workqueue(dev_priv->wq); |
1910 | | | 1906 | |
1911 | pci_dev_put(dev_priv->bridge_dev); | | 1907 | pci_dev_put(dev_priv->bridge_dev); |
1912 | kfree(dev->dev_private); | | 1908 | kfree(dev->dev_private); |
1913 | | | 1909 | |
1914 | return 0; | | 1910 | return 0; |
1915 | } | | 1911 | } |
1916 | | | 1912 | |
1917 | int i915_driver_open(struct drm_device *dev, struct drm_file *file) | | 1913 | int i915_driver_open(struct drm_device *dev, struct drm_file *file) |
1918 | { | | 1914 | { |
1919 | struct drm_i915_file_private *file_priv; | | 1915 | struct drm_i915_file_private *file_priv; |
1920 | | | 1916 | |
1921 | DRM_DEBUG_DRIVER("\n"); | | 1917 | DRM_DEBUG_DRIVER("\n"); |
1922 | file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL); | | 1918 | file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL); |
1923 | if (!file_priv) | | 1919 | if (!file_priv) |
1924 | return -ENOMEM; | | 1920 | return -ENOMEM; |
1925 | | | 1921 | |
1926 | file->driver_priv = file_priv; | | 1922 | file->driver_priv = file_priv; |
1927 | | | 1923 | |
1928 | spin_lock_init(&file_priv->mm.lock); | | 1924 | spin_lock_init(&file_priv->mm.lock); |
1929 | INIT_LIST_HEAD(&file_priv->mm.request_list); | | 1925 | INIT_LIST_HEAD(&file_priv->mm.request_list); |
1930 | | | 1926 | |
1931 | idr_init(&file_priv->context_idr); | | 1927 | idr_init(&file_priv->context_idr); |
1932 | | | 1928 | |
1933 | return 0; | | 1929 | return 0; |
1934 | } | | 1930 | } |
1935 | | | 1931 | |
1936 | /** | | 1932 | /** |
1937 | * i915_driver_lastclose - clean up after all DRM clients have exited | | 1933 | * i915_driver_lastclose - clean up after all DRM clients have exited |
1938 | * @dev: DRM device | | 1934 | * @dev: DRM device |
1939 | * | | 1935 | * |
1940 | * Take care of cleaning up after all DRM clients have exited. In the | | 1936 | * Take care of cleaning up after all DRM clients have exited. In the |
1941 | * mode setting case, we want to restore the kernel's initial mode (just | | 1937 | * mode setting case, we want to restore the kernel's initial mode (just |
1942 | * in case the last client left us in a bad state). | | 1938 | * in case the last client left us in a bad state). |
1943 | * | | 1939 | * |
1944 | * Additionally, in the non-mode setting case, we'll tear down the GTT | | 1940 | * Additionally, in the non-mode setting case, we'll tear down the GTT |
1945 | * and DMA structures, since the kernel won't be using them, and clea | | 1941 | * and DMA structures, since the kernel won't be using them, and clea |
1946 | * up any GEM state. | | 1942 | * up any GEM state. |
1947 | */ | | 1943 | */ |
1948 | void i915_driver_lastclose(struct drm_device * dev) | | 1944 | void i915_driver_lastclose(struct drm_device * dev) |
1949 | { | | 1945 | { |
1950 | drm_i915_private_t *dev_priv = dev->dev_private; | | 1946 | drm_i915_private_t *dev_priv = dev->dev_private; |
1951 | | | 1947 | |
1952 | /* On gen6+ we refuse to init without kms enabled, but then the drm core | | 1948 | /* On gen6+ we refuse to init without kms enabled, but then the drm core |
1953 | * goes right around and calls lastclose. Check for this and don't clean | | 1949 | * goes right around and calls lastclose. Check for this and don't clean |
1954 | * up anything. */ | | 1950 | * up anything. */ |
1955 | if (!dev_priv) | | 1951 | if (!dev_priv) |
1956 | return; | | 1952 | return; |
1957 | | | 1953 | |
1958 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | | 1954 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
1959 | intel_fb_restore_mode(dev); | | 1955 | intel_fb_restore_mode(dev); |
1960 | #ifndef __NetBSD__ /* XXX vga */ | | 1956 | #ifndef __NetBSD__ /* XXX vga */ |
1961 | vga_switcheroo_process_delayed_switch(); | | 1957 | vga_switcheroo_process_delayed_switch(); |
1962 | #endif | | 1958 | #endif |
1963 | return; | | 1959 | return; |
1964 | } | | 1960 | } |
1965 | | | 1961 | |
1966 | i915_gem_lastclose(dev); | | 1962 | i915_gem_lastclose(dev); |
1967 | | | 1963 | |
1968 | i915_dma_cleanup(dev); | | 1964 | i915_dma_cleanup(dev); |
1969 | } | | 1965 | } |
1970 | | | 1966 | |
1971 | void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) | | 1967 | void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) |
1972 | { | | 1968 | { |
1973 | i915_gem_context_close(dev, file_priv); | | 1969 | i915_gem_context_close(dev, file_priv); |
1974 | i915_gem_release(dev, file_priv); | | 1970 | i915_gem_release(dev, file_priv); |
1975 | } | | 1971 | } |
1976 | | | 1972 | |
1977 | void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) | | 1973 | void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) |
1978 | { | | 1974 | { |
1979 | struct drm_i915_file_private *file_priv = file->driver_priv; | | 1975 | struct drm_i915_file_private *file_priv = file->driver_priv; |
1980 | | | 1976 | |
1981 | kfree(file_priv); | | 1977 | kfree(file_priv); |
1982 | } | | 1978 | } |
1983 | | | 1979 | |
1984 | struct drm_ioctl_desc i915_ioctls[] = { | | 1980 | struct drm_ioctl_desc i915_ioctls[] = { |
1985 | DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | | 1981 | DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
1986 | DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH), | | 1982 | DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH), |
1987 | DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH), | | 1983 | DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH), |
1988 | DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH), | | 1984 | DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH), |
1989 | DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH), | | 1985 | DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH), |
1990 | DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH), | | 1986 | DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH), |
1991 | DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH), | | 1987 | DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH), |
1992 | DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | | 1988 | DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
1993 | DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH), | | 1989 | DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH), |
1994 | DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH), | | 1990 | DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH), |
1995 | DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | | 1991 | DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
1996 | DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH), | | 1992 | DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH), |
1997 | DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | | 1993 | DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
1998 | DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | | 1994 | DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
1999 | DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH), | | 1995 | DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH), |
2000 | DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), | | 1996 | DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), |
2001 | DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | | 1997 | DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
2002 | DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), | | 1998 | DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), |
2003 | DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED), | | 1999 | DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED), |
2004 | DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED), | | 2000 | DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED), |
2005 | DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), | | 2001 | DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), |
2006 | DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), | | 2002 | DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), |
2007 | DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), | | 2003 | DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), |
2008 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED), | | 2004 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED), |
2009 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED), | | 2005 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED), |
2010 | DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED), | | 2006 | DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED), |
2011 | DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), | | 2007 | DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), |
2012 | DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), | | 2008 | DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), |
2013 | DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED), | | 2009 | DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED), |
2014 | DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED), | | 2010 | DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED), |
2015 | DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED), | | 2011 | DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED), |
2016 | DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED), | | 2012 | DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED), |
2017 | DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED), | | 2013 | DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED), |
2018 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED), | | 2014 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED), |
2019 | DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED), | | 2015 | DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED), |
2020 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED), | | 2016 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED), |
2021 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED), | | 2017 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED), |
2022 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED), | | 2018 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED), |
2023 | DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED), | | 2019 | DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED), |
2024 | DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED), | | 2020 | DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED), |
2025 | DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), | | 2021 | DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
2026 | DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), | | 2022 | DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
2027 | DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), | | 2023 | DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
2028 | DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), | | 2024 | DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
2029 | DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED), | | 2025 | DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED), |
2030 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED), | | 2026 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED), |
2031 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED), | | 2027 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED), |
2032 | DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED), | | 2028 | DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED), |
2033 | }; | | 2029 | }; |
2034 | | | 2030 | |
2035 | int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); | | 2031 | int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); |
2036 | | | 2032 | |
2037 | /* | | 2033 | /* |
2038 | * This is really ugly: Because old userspace abused the linux agp interface to | | 2034 | * This is really ugly: Because old userspace abused the linux agp interface to |
2039 | * manage the gtt, we need to claim that all intel devices are agp. For | | 2035 | * manage the gtt, we need to claim that all intel devices are agp. For |
2040 | * otherwise the drm core refuses to initialize the agp support code. | | 2036 | * otherwise the drm core refuses to initialize the agp support code. |
2041 | */ | | 2037 | */ |
2042 | int i915_driver_device_is_agp(struct drm_device * dev) | | 2038 | int i915_driver_device_is_agp(struct drm_device * dev) |
2043 | { | | 2039 | { |
2044 | return 1; | | 2040 | return 1; |
2045 | } | | 2041 | } |