| @@ -1,23 +1,23 @@ | | | @@ -1,23 +1,23 @@ |
1 | /* $NetBSD: i915_gem_mman.c,v 1.8 2021/12/19 11:56:52 riastradh Exp $ */ | | 1 | /* $NetBSD: i915_gem_mman.c,v 1.9 2021/12/19 11:57:42 riastradh Exp $ */ |
2 | | | 2 | |
3 | /* | | 3 | /* |
4 | * SPDX-License-Identifier: MIT | | 4 | * SPDX-License-Identifier: MIT |
5 | * | | 5 | * |
6 | * Copyright © 2014-2016 Intel Corporation | | 6 | * Copyright © 2014-2016 Intel Corporation |
7 | */ | | 7 | */ |
8 | | | 8 | |
9 | #include <sys/cdefs.h> | | 9 | #include <sys/cdefs.h> |
10 | __KERNEL_RCSID(0, "$NetBSD: i915_gem_mman.c,v 1.8 2021/12/19 11:56:52 riastradh Exp $"); | | 10 | __KERNEL_RCSID(0, "$NetBSD: i915_gem_mman.c,v 1.9 2021/12/19 11:57:42 riastradh Exp $"); |
11 | | | 11 | |
12 | #include <linux/anon_inodes.h> | | 12 | #include <linux/anon_inodes.h> |
13 | #include <linux/mman.h> | | 13 | #include <linux/mman.h> |
14 | #include <linux/pfn_t.h> | | 14 | #include <linux/pfn_t.h> |
15 | #include <linux/sizes.h> | | 15 | #include <linux/sizes.h> |
16 | | | 16 | |
17 | #include "gt/intel_gt.h" | | 17 | #include "gt/intel_gt.h" |
18 | #include "gt/intel_gt_requests.h" | | 18 | #include "gt/intel_gt_requests.h" |
19 | | | 19 | |
20 | #include "i915_drv.h" | | 20 | #include "i915_drv.h" |
21 | #include "i915_gem_gtt.h" | | 21 | #include "i915_gem_gtt.h" |
22 | #include "i915_gem_ioctls.h" | | 22 | #include "i915_gem_ioctls.h" |
23 | #include "i915_gem_object.h" | | 23 | #include "i915_gem_object.h" |
| @@ -229,36 +229,42 @@ compute_partial_view(const struct drm_i9 | | | @@ -229,36 +229,42 @@ compute_partial_view(const struct drm_i9 |
229 | view.partial.offset = rounddown(page_offset, chunk); | | 229 | view.partial.offset = rounddown(page_offset, chunk); |
230 | view.partial.size = | | 230 | view.partial.size = |
231 | min_t(unsigned int, chunk, | | 231 | min_t(unsigned int, chunk, |
232 | (obj->base.size >> PAGE_SHIFT) - view.partial.offset); | | 232 | (obj->base.size >> PAGE_SHIFT) - view.partial.offset); |
233 | | | 233 | |
234 | /* If the partial covers the entire object, just create a normal VMA. */ | | 234 | /* If the partial covers the entire object, just create a normal VMA. */ |
235 | if (chunk >= obj->base.size >> PAGE_SHIFT) | | 235 | if (chunk >= obj->base.size >> PAGE_SHIFT) |
236 | view.type = I915_GGTT_VIEW_NORMAL; | | 236 | view.type = I915_GGTT_VIEW_NORMAL; |
237 | | | 237 | |
238 | return view; | | 238 | return view; |
239 | } | | 239 | } |
240 | | | 240 | |
241 | #ifdef __NetBSD__ | | 241 | #ifdef __NetBSD__ |
| | | 242 | /* |
| | | 243 | * XXX pmap_enter_default instead of pmap_enter because of a problem |
| | | 244 | * with using weak aliases in kernel modules. |
| | | 245 | * |
| | | 246 | * XXX This probably won't work in a Xen kernel! Maybe this should be |
| | | 247 | * #ifdef _MODULE? |
| | | 248 | */ |
| | | 249 | int pmap_enter_default(pmap_t, vaddr_t, paddr_t, vm_prot_t, unsigned); |
| | | 250 | #define pmap_enter pmap_enter_default |
242 | | | 251 | |
243 | static int | | 252 | static int |
244 | i915_gem_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, struct vm_page **pps, | | 253 | i915_error_to_vmf_fault(int err) |
245 | int npages, int centeridx, vm_prot_t access_type, int flags) | | | |
246 | { | | 254 | { |
247 | panic("NYI"); | | 255 | return err; |
248 | } | | 256 | } |
249 | | | | |
250 | #else | | 257 | #else |
251 | | | | |
252 | static vm_fault_t i915_error_to_vmf_fault(int err) | | 258 | static vm_fault_t i915_error_to_vmf_fault(int err) |
253 | { | | 259 | { |
254 | switch (err) { | | 260 | switch (err) { |
255 | default: | | 261 | default: |
256 | WARN_ONCE(err, "unhandled error in %s: %i\n", __func__, err); | | 262 | WARN_ONCE(err, "unhandled error in %s: %i\n", __func__, err); |
257 | /* fallthrough */ | | 263 | /* fallthrough */ |
258 | case -EIO: /* shmemfs failure from swap device */ | | 264 | case -EIO: /* shmemfs failure from swap device */ |
259 | case -EFAULT: /* purged object */ | | 265 | case -EFAULT: /* purged object */ |
260 | case -ENODEV: /* bad object, how did you get here! */ | | 266 | case -ENODEV: /* bad object, how did you get here! */ |
261 | case -ENXIO: /* unable to access backing store (on device) */ | | 267 | case -ENXIO: /* unable to access backing store (on device) */ |
262 | return VM_FAULT_SIGBUS; | | 268 | return VM_FAULT_SIGBUS; |
263 | | | 269 | |
264 | case -ENOSPC: /* shmemfs allocation failure */ | | 270 | case -ENOSPC: /* shmemfs allocation failure */ |
| @@ -267,89 +273,154 @@ static vm_fault_t i915_error_to_vmf_faul | | | @@ -267,89 +273,154 @@ static vm_fault_t i915_error_to_vmf_faul |
267 | | | 273 | |
268 | case 0: | | 274 | case 0: |
269 | case -EAGAIN: | | 275 | case -EAGAIN: |
270 | case -ERESTARTSYS: | | 276 | case -ERESTARTSYS: |
271 | case -EINTR: | | 277 | case -EINTR: |
272 | case -EBUSY: | | 278 | case -EBUSY: |
273 | /* | | 279 | /* |
274 | * EBUSY is ok: this just means that another thread | | 280 | * EBUSY is ok: this just means that another thread |
275 | * already did the job. | | 281 | * already did the job. |
276 | */ | | 282 | */ |
277 | return VM_FAULT_NOPAGE; | | 283 | return VM_FAULT_NOPAGE; |
278 | } | | 284 | } |
279 | } | | 285 | } |
| | | 286 | #endif |
280 | | | 287 | |
| | | 288 | #ifdef __NetBSD__ |
| | | 289 | static int |
| | | 290 | vm_fault_cpu(struct uvm_faultinfo *ufi, struct i915_mmap_offset *mmo, |
| | | 291 | vaddr_t vaddr, struct vm_page **pps, int npages, int centeridx, int flags) |
| | | 292 | #else |
281 | static vm_fault_t vm_fault_cpu(struct vm_fault *vmf) | | 293 | static vm_fault_t vm_fault_cpu(struct vm_fault *vmf) |
| | | 294 | #endif |
282 | { | | 295 | { |
| | | 296 | #ifndef __NetBSD__ |
283 | struct vm_area_struct *area = vmf->vma; | | 297 | struct vm_area_struct *area = vmf->vma; |
284 | struct i915_mmap_offset *mmo = area->vm_private_data; | | 298 | struct i915_mmap_offset *mmo = area->vm_private_data; |
| | | 299 | #endif |
285 | struct drm_i915_gem_object *obj = mmo->obj; | | 300 | struct drm_i915_gem_object *obj = mmo->obj; |
| | | 301 | #ifdef __NetBSD__ |
| | | 302 | bool write = ufi->entry->protection & VM_PROT_WRITE; |
| | | 303 | #else |
| | | 304 | bool write = area->vm_flags & VM_WRITE; |
| | | 305 | #endif |
286 | resource_size_t iomap; | | 306 | resource_size_t iomap; |
287 | int err; | | 307 | int err; |
288 | | | 308 | |
289 | /* Sanity check that we allow writing into this object */ | | 309 | /* Sanity check that we allow writing into this object */ |
290 | if (unlikely(i915_gem_object_is_readonly(obj) && | | 310 | if (unlikely(i915_gem_object_is_readonly(obj) && write)) |
291 | area->vm_flags & VM_WRITE)) | | 311 | #ifdef __NetBSD__ |
| | | 312 | return -EFAULT; |
| | | 313 | #else |
292 | return VM_FAULT_SIGBUS; | | 314 | return VM_FAULT_SIGBUS; |
| | | 315 | #endif |
293 | | | 316 | |
294 | err = i915_gem_object_pin_pages(obj); | | 317 | err = i915_gem_object_pin_pages(obj); |
295 | if (err) | | 318 | if (err) |
296 | goto out; | | 319 | goto out; |
297 | | | 320 | |
298 | iomap = -1; | | 321 | iomap = -1; |
299 | if (!i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_STRUCT_PAGE)) { | | 322 | if (!i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_STRUCT_PAGE)) { |
300 | iomap = obj->mm.region->iomap.base; | | 323 | iomap = obj->mm.region->iomap.base; |
301 | iomap -= obj->mm.region->region.start; | | 324 | iomap -= obj->mm.region->region.start; |
302 | } | | 325 | } |
303 | | | 326 | |
304 | /* PTEs are revoked in obj->ops->put_pages() */ | | 327 | /* PTEs are revoked in obj->ops->put_pages() */ |
| | | 328 | #ifdef __NetBSD__ |
| | | 329 | /* XXX No lmem supported yet. */ |
| | | 330 | KASSERT(i915_gem_object_type_has(obj, |
| | | 331 | I915_GEM_OBJECT_HAS_STRUCT_PAGE)); |
| | | 332 | |
| | | 333 | struct scatterlist *sg = obj->mm.pages->sgl; |
| | | 334 | unsigned startpage = (ufi->entry->offset + (vaddr - ufi->entry->start)) |
| | | 335 | >> PAGE_SHIFT; |
| | | 336 | paddr_t paddr; |
| | | 337 | int i; |
| | | 338 | |
| | | 339 | for (i = 0; i < npages; i++) { |
| | | 340 | if ((flags & PGO_ALLPAGES) == 0 && i != centeridx) |
| | | 341 | continue; |
| | | 342 | if (pps[i] == PGO_DONTCARE) |
| | | 343 | continue; |
| | | 344 | paddr = page_to_phys(sg->sg_pgs[startpage + i]); |
| | | 345 | /* XXX errno NetBSD->Linux */ |
| | | 346 | err = -pmap_enter(ufi->orig_map->pmap, |
| | | 347 | vaddr + i*PAGE_SIZE, paddr, ufi->entry->protection, |
| | | 348 | PMAP_CANFAIL | ufi->entry->protection); |
| | | 349 | if (err) |
| | | 350 | break; |
| | | 351 | } |
| | | 352 | pmap_update(ufi->orig_map->pmap); |
| | | 353 | #else |
305 | err = remap_io_sg(area, | | 354 | err = remap_io_sg(area, |
306 | area->vm_start, area->vm_end - area->vm_start, | | 355 | area->vm_start, area->vm_end - area->vm_start, |
307 | obj->mm.pages->sgl, iomap); | | 356 | obj->mm.pages->sgl, iomap); |
| | | 357 | #endif |
308 | | | 358 | |
309 | if (area->vm_flags & VM_WRITE) { | | 359 | if (write) { |
310 | GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); | | 360 | GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); |
311 | obj->mm.dirty = true; | | 361 | obj->mm.dirty = true; |
312 | } | | 362 | } |
313 | | | 363 | |
314 | i915_gem_object_unpin_pages(obj); | | 364 | i915_gem_object_unpin_pages(obj); |
315 | | | 365 | |
316 | out: | | 366 | out: |
317 | return i915_error_to_vmf_fault(err); | | 367 | return i915_error_to_vmf_fault(err); |
318 | } | | 368 | } |
319 | | | 369 | |
| | | 370 | #ifdef __NetBSD__ |
| | | 371 | static int |
| | | 372 | vm_fault_gtt(struct uvm_faultinfo *ufi, struct i915_mmap_offset *mmo, |
| | | 373 | vaddr_t vaddr, struct vm_page **pps, int npages, int centeridx, int flags) |
| | | 374 | #else |
320 | static vm_fault_t vm_fault_gtt(struct vm_fault *vmf) | | 375 | static vm_fault_t vm_fault_gtt(struct vm_fault *vmf) |
| | | 376 | #endif |
321 | { | | 377 | { |
322 | #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT) | | 378 | #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT) |
| | | 379 | #ifndef __NetBSD__ |
323 | struct vm_area_struct *area = vmf->vma; | | 380 | struct vm_area_struct *area = vmf->vma; |
324 | struct i915_mmap_offset *mmo = area->vm_private_data; | | 381 | struct i915_mmap_offset *mmo = area->vm_private_data; |
| | | 382 | #endif |
325 | struct drm_i915_gem_object *obj = mmo->obj; | | 383 | struct drm_i915_gem_object *obj = mmo->obj; |
326 | struct drm_device *dev = obj->base.dev; | | 384 | struct drm_device *dev = obj->base.dev; |
327 | struct drm_i915_private *i915 = to_i915(dev); | | 385 | struct drm_i915_private *i915 = to_i915(dev); |
328 | struct intel_runtime_pm *rpm = &i915->runtime_pm; | | 386 | struct intel_runtime_pm *rpm = &i915->runtime_pm; |
329 | struct i915_ggtt *ggtt = &i915->ggtt; | | 387 | struct i915_ggtt *ggtt = &i915->ggtt; |
| | | 388 | #ifdef __NetBSD__ |
| | | 389 | bool write = ufi->entry->protection & VM_PROT_WRITE; |
| | | 390 | #else |
330 | bool write = area->vm_flags & VM_WRITE; | | 391 | bool write = area->vm_flags & VM_WRITE; |
| | | 392 | #endif |
331 | intel_wakeref_t wakeref; | | 393 | intel_wakeref_t wakeref; |
332 | struct i915_vma *vma; | | 394 | struct i915_vma *vma; |
333 | pgoff_t page_offset; | | 395 | pgoff_t page_offset; |
334 | int srcu; | | 396 | int srcu; |
335 | int ret; | | 397 | int ret; |
336 | | | 398 | |
337 | /* Sanity check that we allow writing into this object */ | | 399 | /* Sanity check that we allow writing into this object */ |
338 | if (i915_gem_object_is_readonly(obj) && write) | | 400 | if (i915_gem_object_is_readonly(obj) && write) |
| | | 401 | #ifdef __NetBSD__ |
| | | 402 | return -EFAULT; |
| | | 403 | #else |
339 | return VM_FAULT_SIGBUS; | | 404 | return VM_FAULT_SIGBUS; |
| | | 405 | #endif |
340 | | | 406 | |
| | | 407 | #ifdef __NetBSD__ |
| | | 408 | page_offset = (ufi->entry->offset + (vaddr - ufi->entry->start)) |
| | | 409 | >> PAGE_SHIFT; |
| | | 410 | #else |
341 | /* We don't use vmf->pgoff since that has the fake offset */ | | 411 | /* We don't use vmf->pgoff since that has the fake offset */ |
342 | page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT; | | 412 | page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT; |
| | | 413 | #endif |
343 | | | 414 | |
344 | trace_i915_gem_object_fault(obj, page_offset, true, write); | | 415 | trace_i915_gem_object_fault(obj, page_offset, true, write); |
345 | | | 416 | |
346 | ret = i915_gem_object_pin_pages(obj); | | 417 | ret = i915_gem_object_pin_pages(obj); |
347 | if (ret) | | 418 | if (ret) |
348 | goto err; | | 419 | goto err; |
349 | | | 420 | |
350 | wakeref = intel_runtime_pm_get(rpm); | | 421 | wakeref = intel_runtime_pm_get(rpm); |
351 | | | 422 | |
352 | ret = intel_gt_reset_trylock(ggtt->vm.gt, &srcu); | | 423 | ret = intel_gt_reset_trylock(ggtt->vm.gt, &srcu); |
353 | if (ret) | | 424 | if (ret) |
354 | goto err_rpm; | | 425 | goto err_rpm; |
355 | | | 426 | |
| @@ -389,31 +460,53 @@ static vm_fault_t vm_fault_gtt(struct vm | | | @@ -389,31 +460,53 @@ static vm_fault_t vm_fault_gtt(struct vm |
389 | } | | 460 | } |
390 | | | 461 | |
391 | /* Access to snoopable pages through the GTT is incoherent. */ | | 462 | /* Access to snoopable pages through the GTT is incoherent. */ |
392 | if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(i915)) { | | 463 | if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(i915)) { |
393 | ret = -EFAULT; | | 464 | ret = -EFAULT; |
394 | goto err_unpin; | | 465 | goto err_unpin; |
395 | } | | 466 | } |
396 | | | 467 | |
397 | ret = i915_vma_pin_fence(vma); | | 468 | ret = i915_vma_pin_fence(vma); |
398 | if (ret) | | 469 | if (ret) |
399 | goto err_unpin; | | 470 | goto err_unpin; |
400 | | | 471 | |
401 | /* Finally, remap it using the new GTT offset */ | | 472 | /* Finally, remap it using the new GTT offset */ |
| | | 473 | #ifdef __NetBSD__ |
| | | 474 | unsigned startpage = page_offset; |
| | | 475 | paddr_t paddr; |
| | | 476 | int i; |
| | | 477 | |
| | | 478 | for (i = 0; i < npages; i++) { |
| | | 479 | if ((flags & PGO_ALLPAGES) == 0 && i != centeridx) |
| | | 480 | continue; |
| | | 481 | if (pps[i] == PGO_DONTCARE) |
| | | 482 | continue; |
| | | 483 | paddr = ggtt->gmadr.start + vma->node.start |
| | | 484 | + (startpage + i)*PAGE_SIZE; |
| | | 485 | /* XXX errno NetBSD->Linux */ |
| | | 486 | ret = -pmap_enter(ufi->orig_map->pmap, |
| | | 487 | vaddr + i*PAGE_SIZE, paddr, ufi->entry->protection, |
| | | 488 | PMAP_CANFAIL | ufi->entry->protection); |
| | | 489 | if (ret) |
| | | 490 | break; |
| | | 491 | } |
| | | 492 | pmap_update(ufi->orig_map->pmap); |
| | | 493 | #else |
402 | ret = remap_io_mapping(area, | | 494 | ret = remap_io_mapping(area, |
403 | area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT), | | 495 | area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT), |
404 | (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT, | | 496 | (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT, |
405 | min_t(u64, vma->size, area->vm_end - area->vm_start), | | 497 | min_t(u64, vma->size, area->vm_end - area->vm_start), |
406 | &ggtt->iomap); | | 498 | &ggtt->iomap); |
| | | 499 | #endif |
407 | if (ret) | | 500 | if (ret) |
408 | goto err_fence; | | 501 | goto err_fence; |
409 | | | 502 | |
410 | assert_rpm_wakelock_held(rpm); | | 503 | assert_rpm_wakelock_held(rpm); |
411 | | | 504 | |
412 | /* Mark as being mmapped into userspace for later revocation */ | | 505 | /* Mark as being mmapped into userspace for later revocation */ |
413 | mutex_lock(&i915->ggtt.vm.mutex); | | 506 | mutex_lock(&i915->ggtt.vm.mutex); |
414 | if (!i915_vma_set_userfault(vma) && !obj->userfault_count++) | | 507 | if (!i915_vma_set_userfault(vma) && !obj->userfault_count++) |
415 | list_add(&obj->userfault_link, &i915->ggtt.userfault_list); | | 508 | list_add(&obj->userfault_link, &i915->ggtt.userfault_list); |
416 | mutex_unlock(&i915->ggtt.vm.mutex); | | 509 | mutex_unlock(&i915->ggtt.vm.mutex); |
417 | | | 510 | |
418 | /* Track the mmo associated with the fenced vma */ | | 511 | /* Track the mmo associated with the fenced vma */ |
419 | vma->mmo = mmo; | | 512 | vma->mmo = mmo; |
| @@ -431,27 +524,114 @@ static vm_fault_t vm_fault_gtt(struct vm | | | @@ -431,27 +524,114 @@ static vm_fault_t vm_fault_gtt(struct vm |
431 | err_fence: | | 524 | err_fence: |
432 | i915_vma_unpin_fence(vma); | | 525 | i915_vma_unpin_fence(vma); |
433 | err_unpin: | | 526 | err_unpin: |
434 | __i915_vma_unpin(vma); | | 527 | __i915_vma_unpin(vma); |
435 | err_reset: | | 528 | err_reset: |
436 | intel_gt_reset_unlock(ggtt->vm.gt, srcu); | | 529 | intel_gt_reset_unlock(ggtt->vm.gt, srcu); |
437 | err_rpm: | | 530 | err_rpm: |
438 | intel_runtime_pm_put(rpm, wakeref); | | 531 | intel_runtime_pm_put(rpm, wakeref); |
439 | i915_gem_object_unpin_pages(obj); | | 532 | i915_gem_object_unpin_pages(obj); |
440 | err: | | 533 | err: |
441 | return i915_error_to_vmf_fault(ret); | | 534 | return i915_error_to_vmf_fault(ret); |
442 | } | | 535 | } |
443 | | | 536 | |
444 | #endif /* __NetBSD__ */ | | 537 | #ifdef __NetBSD__ |
| | | 538 | |
| | | 539 | static int |
| | | 540 | i915_gem_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, struct vm_page **pps, |
| | | 541 | int npages, int centeridx, vm_prot_t access_type, int flags) |
| | | 542 | { |
| | | 543 | struct uvm_object *uobj = ufi->entry->object.uvm_obj; |
| | | 544 | struct drm_gem_object *gem = |
| | | 545 | container_of(uobj, struct drm_gem_object, gemo_uvmobj); |
| | | 546 | struct drm_i915_gem_object *obj = to_intel_bo(gem); |
| | | 547 | struct drm_device *dev = obj->base.dev; |
| | | 548 | struct drm_vma_offset_node *node; |
| | | 549 | struct i915_mmap_offset *mmo; |
| | | 550 | bool pinned = false; |
| | | 551 | int error; |
| | | 552 | |
| | | 553 | KASSERT(rw_lock_held(obj->base.filp->vmobjlock)); |
| | | 554 | KASSERT(!i915_gem_object_is_readonly(obj) || |
| | | 555 | (access_type & VM_PROT_WRITE) == 0); |
| | | 556 | KASSERT(i915_gem_object_type_has(obj, |
| | | 557 | I915_GEM_OBJECT_HAS_STRUCT_PAGE|I915_GEM_OBJECT_HAS_IOMEM)); |
| | | 558 | |
| | | 559 | /* Actually we don't support iomem right now! */ |
| | | 560 | KASSERT(i915_gem_object_type_has(obj, |
| | | 561 | I915_GEM_OBJECT_HAS_STRUCT_PAGE)); |
| | | 562 | |
| | | 563 | /* |
| | | 564 | * Look up the mmo again because we can't conveniently store it |
| | | 565 | * alongside the mapping unless we create a separate uvm object |
| | | 566 | * for it. XXX Consider creating a separate uvm object as a |
| | | 567 | * kind of subobject of the main object. |
| | | 568 | */ |
| | | 569 | rcu_read_lock(); |
| | | 570 | drm_vma_offset_lock_lookup(dev->vma_offset_manager); |
| | | 571 | node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, |
| | | 572 | ufi->entry->start >> PAGE_SHIFT, |
| | | 573 | (ufi->entry->end - ufi->entry->start) >> PAGE_SHIFT); |
| | | 574 | drm_vma_offset_unlock_lookup(dev->vma_offset_manager); |
| | | 575 | rcu_read_unlock(); |
| | | 576 | |
| | | 577 | /* |
| | | 578 | * The mmo had better be there -- hope we can't remove the mmo |
| | | 579 | * without unmapping first! |
| | | 580 | */ |
| | | 581 | KASSERT(node); |
| | | 582 | mmo = container_of(node, struct i915_mmap_offset, vma_node); |
| | | 583 | KASSERT(obj == mmo->obj); |
| | | 584 | |
| | | 585 | /* XXX errno Linux->NetBSD */ |
| | | 586 | error = -i915_gem_object_pin_pages(obj); |
| | | 587 | if (error) |
| | | 588 | goto out; |
| | | 589 | pinned = true; |
| | | 590 | |
| | | 591 | switch (mmo->mmap_type) { |
| | | 592 | case I915_MMAP_TYPE_WC: |
| | | 593 | case I915_MMAP_TYPE_WB: |
| | | 594 | case I915_MMAP_TYPE_UC: |
| | | 595 | /* XXX errno Linux->NetBSD */ |
| | | 596 | error = -vm_fault_cpu(ufi, mmo, vaddr, pps, npages, centeridx, |
| | | 597 | flags); |
| | | 598 | break; |
| | | 599 | case I915_MMAP_TYPE_GTT: |
| | | 600 | error = -vm_fault_gtt(ufi, mmo, vaddr, pps, npages, centeridx, |
| | | 601 | flags); |
| | | 602 | break; |
| | | 603 | default: |
| | | 604 | panic("invalid i915 gem mmap offset type: %d", |
| | | 605 | mmo->mmap_type); |
| | | 606 | } |
| | | 607 | |
| | | 608 | out: if (pinned) |
| | | 609 | i915_gem_object_unpin_pages(obj); |
| | | 610 | uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj); |
| | | 611 | |
| | | 612 | /* |
| | | 613 | * Remap EINTR to success, so that we return to userland. |
| | | 614 | * On the way out, we'll deliver the signal, and if the signal |
| | | 615 | * is not fatal then the user code which faulted will most likely |
| | | 616 | * fault again, and we'll come back here for another try. |
| | | 617 | */ |
| | | 618 | if (error == EINTR) |
| | | 619 | error = 0; |
| | | 620 | |
| | | 621 | return error; |
| | | 622 | } |
| | | 623 | |
| | | 624 | #endif |
445 | | | 625 | |
446 | void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj) | | 626 | void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj) |
447 | { | | 627 | { |
448 | struct i915_vma *vma; | | 628 | struct i915_vma *vma; |
449 | | | 629 | |
450 | GEM_BUG_ON(!obj->userfault_count); | | 630 | GEM_BUG_ON(!obj->userfault_count); |
451 | | | 631 | |
452 | for_each_ggtt_vma(vma, obj) | | 632 | for_each_ggtt_vma(vma, obj) |
453 | i915_vma_revoke_mmap(vma); | | 633 | i915_vma_revoke_mmap(vma); |
454 | | | 634 | |
455 | GEM_BUG_ON(obj->userfault_count); | | 635 | GEM_BUG_ON(obj->userfault_count); |
456 | } | | 636 | } |
457 | | | 637 | |