Tue Sep 13 10:14:43 2022 UTC ()
i915: Fix error branches of execbuffer vma pinning.

PR kern/56591


(riastradh)
diff -r1.4 -r1.5 src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_execbuffer.c

cvs diff -r1.4 -r1.5 src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_execbuffer.c (expand / switch to unified diff)

--- src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_execbuffer.c 2021/12/19 11:33:30 1.4
+++ src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_execbuffer.c 2022/09/13 10:14:43 1.5
@@ -1,23 +1,23 @@ @@ -1,23 +1,23 @@
1/* $NetBSD: i915_gem_execbuffer.c,v 1.4 2021/12/19 11:33:30 riastradh Exp $ */ 1/* $NetBSD: i915_gem_execbuffer.c,v 1.5 2022/09/13 10:14:43 riastradh Exp $ */
2 2
3/* 3/*
4 * SPDX-License-Identifier: MIT 4 * SPDX-License-Identifier: MIT
5 * 5 *
6 * Copyright © 2008,2010 Intel Corporation 6 * Copyright © 2008,2010 Intel Corporation
7 */ 7 */
8 8
9#include <sys/cdefs.h> 9#include <sys/cdefs.h>
10__KERNEL_RCSID(0, "$NetBSD: i915_gem_execbuffer.c,v 1.4 2021/12/19 11:33:30 riastradh Exp $"); 10__KERNEL_RCSID(0, "$NetBSD: i915_gem_execbuffer.c,v 1.5 2022/09/13 10:14:43 riastradh Exp $");
11 11
12#include <linux/intel-iommu.h> 12#include <linux/intel-iommu.h>
13#include <linux/dma-resv.h> 13#include <linux/dma-resv.h>
14#include <linux/sync_file.h> 14#include <linux/sync_file.h>
15#include <linux/uaccess.h> 15#include <linux/uaccess.h>
16 16
17#include <drm/drm_syncobj.h> 17#include <drm/drm_syncobj.h>
18#include <drm/i915_drm.h> 18#include <drm/i915_drm.h>
19 19
20#ifdef __NetBSD__ 20#ifdef __NetBSD__
21#include <sys/filedesc.h> 21#include <sys/filedesc.h>
22#endif 22#endif
23 23
@@ -536,28 +536,32 @@ eb_add_vma(struct i915_execbuffer *eb, @@ -536,28 +536,32 @@ eb_add_vma(struct i915_execbuffer *eb,
536 536
537 err = 0; 537 err = 0;
538 if (eb_pin_vma(eb, entry, vma)) { 538 if (eb_pin_vma(eb, entry, vma)) {
539 if (entry->offset != vma->node.start) { 539 if (entry->offset != vma->node.start) {
540 entry->offset = vma->node.start | UPDATE; 540 entry->offset = vma->node.start | UPDATE;
541 eb->args->flags |= __EXEC_HAS_RELOC; 541 eb->args->flags |= __EXEC_HAS_RELOC;
542 } 542 }
543 } else { 543 } else {
544 eb_unreserve_vma(vma, vma->exec_flags); 544 eb_unreserve_vma(vma, vma->exec_flags);
545 545
546 list_add_tail(&vma->exec_link, &eb->unbound); 546 list_add_tail(&vma->exec_link, &eb->unbound);
547 if (drm_mm_node_allocated(&vma->node)) 547 if (drm_mm_node_allocated(&vma->node))
548 err = i915_vma_unbind(vma); 548 err = i915_vma_unbind(vma);
549 if (unlikely(err)) 549 if (unlikely(err)) {
550 vma->exec_flags = NULL; 550 vma->exec_flags = NULL;
 551 if (i == batch_idx)
 552 eb->batch = NULL;
 553 eb->vma[i] = NULL;
 554 }
551 } 555 }
552 return err; 556 return err;
553} 557}
554 558
555static inline int use_cpu_reloc(const struct reloc_cache *cache, 559static inline int use_cpu_reloc(const struct reloc_cache *cache,
556 const struct drm_i915_gem_object *obj) 560 const struct drm_i915_gem_object *obj)
557{ 561{
558 if (!i915_gem_object_has_struct_page(obj)) 562 if (!i915_gem_object_has_struct_page(obj))
559 return false; 563 return false;
560 564
561 if (DBG_FORCE_RELOC == FORCE_CPU_RELOC) 565 if (DBG_FORCE_RELOC == FORCE_CPU_RELOC)
562 return true; 566 return true;
563 567
@@ -1896,26 +1900,28 @@ static int eb_move_to_gpu(struct i915_ex @@ -1896,26 +1900,28 @@ static int eb_move_to_gpu(struct i915_ex
1896 1900
1897 if (err == 0 && !(flags & EXEC_OBJECT_ASYNC)) { 1901 if (err == 0 && !(flags & EXEC_OBJECT_ASYNC)) {
1898 err = i915_request_await_object 1902 err = i915_request_await_object
1899 (eb->request, obj, flags & EXEC_OBJECT_WRITE); 1903 (eb->request, obj, flags & EXEC_OBJECT_WRITE);
1900 } 1904 }
1901 1905
1902 if (err == 0) 1906 if (err == 0)
1903 err = i915_vma_move_to_active(vma, eb->request, flags); 1907 err = i915_vma_move_to_active(vma, eb->request, flags);
1904 1908
1905 i915_vma_unlock(vma); 1909 i915_vma_unlock(vma);
1906 1910
1907 __eb_unreserve_vma(vma, flags); 1911 __eb_unreserve_vma(vma, flags);
1908 vma->exec_flags = NULL; 1912 vma->exec_flags = NULL;
 1913 if (err)
 1914 eb->vma[i] = NULL;
1909 1915
1910 if (unlikely(flags & __EXEC_OBJECT_HAS_REF)) 1916 if (unlikely(flags & __EXEC_OBJECT_HAS_REF))
1911 i915_vma_put(vma); 1917 i915_vma_put(vma);
1912 } 1918 }
1913 ww_acquire_fini(&acquire); 1919 ww_acquire_fini(&acquire);
1914 1920
1915 if (unlikely(err)) 1921 if (unlikely(err))
1916 goto err_skip; 1922 goto err_skip;
1917 1923
1918 eb->exec = NULL; 1924 eb->exec = NULL;
1919 1925
1920 /* Unconditionally flush any chipset caches (for streaming writes). */ 1926 /* Unconditionally flush any chipset caches (for streaming writes). */
1921 intel_gt_chipset_flush(eb->engine->gt); 1927 intel_gt_chipset_flush(eb->engine->gt);