Sun Dec 19 01:38:51 2021 UTC ()
Tell i915_gem_internal.c the good news about bus_dma.


(riastradh)
diff -r1.2 -r1.3 src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_internal.c
diff -r1.2 -r1.3 src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_object_types.h

cvs diff -r1.2 -r1.3 src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_internal.c (expand / switch to unified diff)

--- src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_internal.c 2021/12/18 23:45:30 1.2
+++ src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_internal.c 2021/12/19 01:38:51 1.3
@@ -1,55 +1,110 @@ @@ -1,55 +1,110 @@
1/* $NetBSD: i915_gem_internal.c,v 1.2 2021/12/18 23:45:30 riastradh Exp $ */ 1/* $NetBSD: i915_gem_internal.c,v 1.3 2021/12/19 01:38:51 riastradh Exp $ */
2 2
3/* 3/*
4 * SPDX-License-Identifier: MIT 4 * SPDX-License-Identifier: MIT
5 * 5 *
6 * Copyright © 2014-2016 Intel Corporation 6 * Copyright © 2014-2016 Intel Corporation
7 */ 7 */
8 8
9#include <sys/cdefs.h> 9#include <sys/cdefs.h>
10__KERNEL_RCSID(0, "$NetBSD: i915_gem_internal.c,v 1.2 2021/12/18 23:45:30 riastradh Exp $"); 10__KERNEL_RCSID(0, "$NetBSD: i915_gem_internal.c,v 1.3 2021/12/19 01:38:51 riastradh Exp $");
11 11
12#include <linux/scatterlist.h> 12#include <linux/scatterlist.h>
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/swiotlb.h> 14#include <linux/swiotlb.h>
15 15
16#include <drm/i915_drm.h> 16#include <drm/i915_drm.h>
17 17
18#include "i915_drv.h" 18#include "i915_drv.h"
19#include "i915_gem.h" 19#include "i915_gem.h"
20#include "i915_gem_object.h" 20#include "i915_gem_object.h"
21#include "i915_scatterlist.h" 21#include "i915_scatterlist.h"
22#include "i915_utils.h" 22#include "i915_utils.h"
23 23
 24#ifndef __NetBSD__
24#define QUIET (__GFP_NORETRY | __GFP_NOWARN) 25#define QUIET (__GFP_NORETRY | __GFP_NOWARN)
25#define MAYFAIL (__GFP_RETRY_MAYFAIL | __GFP_NOWARN) 26#define MAYFAIL (__GFP_RETRY_MAYFAIL | __GFP_NOWARN)
26 27
27static void internal_free_pages(struct sg_table *st) 28static void internal_free_pages(struct sg_table *st)
28{ 29{
29 struct scatterlist *sg; 30 struct scatterlist *sg;
30 31
31 for (sg = st->sgl; sg; sg = __sg_next(sg)) { 32 for (sg = st->sgl; sg; sg = __sg_next(sg)) {
32 if (sg_page(sg)) 33 if (sg_page(sg))
33 __free_pages(sg_page(sg), get_order(sg->length)); 34 __free_pages(sg_page(sg), get_order(sg->length));
34 } 35 }
35 36
36 sg_free_table(st); 37 sg_free_table(st);
37 kfree(st); 38 kfree(st);
38} 39}
 40#endif
39 41
40static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) 42static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
41{ 43{
42 struct drm_i915_private *i915 = to_i915(obj->base.dev); 44 struct drm_i915_private *i915 = to_i915(obj->base.dev);
 45#ifdef __NetBSD__
 46 bus_dma_tag_t dmat = i915->drm.dmat;
 47 bus_dmamap_t map;
 48 size_t nsegs;
 49 unsigned sizes, seg;
 50 int ret;
 51
 52 KASSERT(obj->mm.segs == NULL);
 53 nsegs = obj->mm.nsegs = obj->base.size/PAGE_SIZE;
 54 if (nsegs > UINT_MAX || nsegs > SIZE_MAX/sizeof(obj->mm.segs[0]))
 55 return -ENOMEM;
 56 obj->mm.segs = kmem_alloc(nsegs * sizeof(obj->mm.segs[0]), KM_NOSLEEP);
 57 if (obj->mm.segs == NULL)
 58 return -ENOMEM;
 59
 60 /* XXX errno NetBSD->Linux */
 61 ret = -bus_dmamem_alloc(dmat, obj->base.size, PAGE_SIZE, 0,
 62 obj->mm.segs, nsegs, &obj->mm.rsegs, BUS_DMA_NOWAIT);
 63 if (ret)
 64 goto out0;
 65
 66 /* XXX errno NetBSD->Linux */
 67 ret = -bus_dmamap_create(dmat, obj->base.size, obj->mm.rsegs,
 68 obj->base.size, 0, BUS_DMA_NOWAIT, &map);
 69 if (ret)
 70 goto out1;
 71
 72 /* XXX errno NetBSD->Linux */
 73 ret = -bus_dmamap_load_raw(dmat, map, obj->mm.segs, obj->mm.rsegs,
 74 obj->base.size, BUS_DMA_NOWAIT);
 75 if (ret)
 76 goto out2;
 77
 78 ret = i915_gem_gtt_prepare_pages(obj, map);
 79 if (ret)
 80 goto out3;
 81
 82 for (sizes = 0, seg = 0; seg < map->dm_nsegs; seg++)
 83 sizes |= map->dm_segs[seg].ds_len;
 84
 85 obj->mm.madv = I915_MADV_DONTNEED;
 86 __i915_gem_object_set_pages(obj, map, sizes);
 87
 88 return 0;
 89
 90out4: __unused
 91 i915_gem_gtt_finish_pages(obj, map);
 92out3: bus_dmamap_unload(dmat, obj->mm.pages);
 93out2: bus_dmamap_destroy(dmat, obj->mm.pages);
 94out1: bus_dmamem_free(dmat, obj->mm.segs, obj->mm.rsegs);
 95out0: kmem_free(obj->mm.segs, nsegs * sizeof(obj->mm.segs[0]));
 96 return ret;
 97#else
43 struct sg_table *st; 98 struct sg_table *st;
44 struct scatterlist *sg; 99 struct scatterlist *sg;
45 unsigned int sg_page_sizes; 100 unsigned int sg_page_sizes;
46 unsigned int npages; 101 unsigned int npages;
47 int max_order; 102 int max_order;
48 gfp_t gfp; 103 gfp_t gfp;
49 104
50 max_order = MAX_ORDER; 105 max_order = MAX_ORDER;
51#ifdef CONFIG_SWIOTLB 106#ifdef CONFIG_SWIOTLB
52 if (swiotlb_nr_tbl()) { 107 if (swiotlb_nr_tbl()) {
53 unsigned int max_segment; 108 unsigned int max_segment;
54 109
55 max_segment = swiotlb_max_segment(); 110 max_segment = swiotlb_max_segment();
@@ -122,33 +177,50 @@ create_st: @@ -122,33 +177,50 @@ create_st:
122 goto err; 177 goto err;
123 } 178 }
124 179
125 __i915_gem_object_set_pages(obj, st, sg_page_sizes); 180 __i915_gem_object_set_pages(obj, st, sg_page_sizes);
126 181
127 return 0; 182 return 0;
128 183
129err: 184err:
130 sg_set_page(sg, NULL, 0, 0); 185 sg_set_page(sg, NULL, 0, 0);
131 sg_mark_end(sg); 186 sg_mark_end(sg);
132 internal_free_pages(st); 187 internal_free_pages(st);
133 188
134 return -ENOMEM; 189 return -ENOMEM;
 190#endif
135} 191}
136 192
 193#ifdef __NetBSD__
 194static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
 195 bus_dmamap_t pages)
 196#else
137static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj, 197static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
138 struct sg_table *pages) 198 struct sg_table *pages)
 199#endif
139{ 200{
140 i915_gem_gtt_finish_pages(obj, pages); 201 i915_gem_gtt_finish_pages(obj, pages);
 202#ifdef __NetBSD__
 203 bus_dma_tag_t dmat = obj->base.dev->dmat;
 204 bus_dmamap_unload(dmat, pages);
 205 bus_dmamap_destroy(dmat, pages);
 206 bus_dmamem_free(dmat, obj->mm.segs, obj->mm.rsegs);
 207 obj->mm.rsegs = 0;
 208 kmem_free(obj->mm.segs, obj->mm.nsegs * sizeof(obj->mm.segs[0]));
 209 obj->mm.segs = NULL;
 210 obj->mm.nsegs = 0;
 211#else
141 internal_free_pages(pages); 212 internal_free_pages(pages);
 213#endif
142 214
143 obj->mm.dirty = false; 215 obj->mm.dirty = false;
144} 216}
145 217
146static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = { 218static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
147 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | 219 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
148 I915_GEM_OBJECT_IS_SHRINKABLE, 220 I915_GEM_OBJECT_IS_SHRINKABLE,
149 .get_pages = i915_gem_object_get_pages_internal, 221 .get_pages = i915_gem_object_get_pages_internal,
150 .put_pages = i915_gem_object_put_pages_internal, 222 .put_pages = i915_gem_object_put_pages_internal,
151}; 223};
152 224
153/** 225/**
154 * i915_gem_object_create_internal: create an object with volatile pages 226 * i915_gem_object_create_internal: create an object with volatile pages

cvs diff -r1.2 -r1.3 src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_object_types.h (expand / switch to unified diff)

--- src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_object_types.h 2021/12/18 23:45:30 1.2
+++ src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_object_types.h 2021/12/19 01:38:51 1.3
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: i915_gem_object_types.h,v 1.2 2021/12/18 23:45:30 riastradh Exp $ */ 1/* $NetBSD: i915_gem_object_types.h,v 1.3 2021/12/19 01:38:51 riastradh Exp $ */
2 2
3/* 3/*
4 * SPDX-License-Identifier: MIT 4 * SPDX-License-Identifier: MIT
5 * 5 *
6 * Copyright © 2016 Intel Corporation 6 * Copyright © 2016 Intel Corporation
7 */ 7 */
8 8
9#ifndef __I915_GEM_OBJECT_TYPES_H__ 9#ifndef __I915_GEM_OBJECT_TYPES_H__
10#define __I915_GEM_OBJECT_TYPES_H__ 10#define __I915_GEM_OBJECT_TYPES_H__
11 11
12#include <drm/drm_gem.h> 12#include <drm/drm_gem.h>
13#include <uapi/drm/i915_drm.h> 13#include <uapi/drm/i915_drm.h>
14 14
@@ -198,27 +198,36 @@ struct drm_i915_gem_object { @@ -198,27 +198,36 @@ struct drm_i915_gem_object {
198 */ 198 */
199 struct intel_memory_region *region; 199 struct intel_memory_region *region;
200 /** 200 /**
201 * List of memory region blocks allocated for this object. 201 * List of memory region blocks allocated for this object.
202 */ 202 */
203 struct list_head blocks; 203 struct list_head blocks;
204 /** 204 /**
205 * Element within memory_region->objects or region->purgeable 205 * Element within memory_region->objects or region->purgeable
206 * if the object is marked as DONTNEED. Access is protected by 206 * if the object is marked as DONTNEED. Access is protected by
207 * region->obj_lock. 207 * region->obj_lock.
208 */ 208 */
209 struct list_head region_link; 209 struct list_head region_link;
210 210
 211#ifdef __NetBSD__
 212 struct pglist pageq; /* wired pages of normal objects */
 213 struct sg_table *sg; /* drm prime objects */
 214 bus_dma_segment_t *segs;/* internal objects */
 215 unsigned nsegs;
 216 int rsegs;
 217 bus_dmamap_t pages; /* expedient misnomer */
 218#else
211 struct sg_table *pages; 219 struct sg_table *pages;
 220#endif
212 void *mapping; 221 void *mapping;
213 222
214 struct i915_page_sizes { 223 struct i915_page_sizes {
215 /** 224 /**
216 * The sg mask of the pages sg_table. i.e the mask of 225 * The sg mask of the pages sg_table. i.e the mask of
217 * of the lengths for each sg entry. 226 * of the lengths for each sg entry.
218 */ 227 */
219 unsigned int phys; 228 unsigned int phys;
220 229
221 /** 230 /**
222 * The gtt page sizes we are allowed to use given the 231 * The gtt page sizes we are allowed to use given the
223 * sg mask and the supported page sizes. This will 232 * sg mask and the supported page sizes. This will
224 * express the smallest unit we can use for the whole 233 * express the smallest unit we can use for the whole