| @@ -1,55 +1,110 @@ | | | @@ -1,55 +1,110 @@ |
1 | /* $NetBSD: i915_gem_internal.c,v 1.2 2021/12/18 23:45:30 riastradh Exp $ */ | | 1 | /* $NetBSD: i915_gem_internal.c,v 1.3 2021/12/19 01:38:51 riastradh Exp $ */ |
2 | | | 2 | |
3 | /* | | 3 | /* |
4 | * SPDX-License-Identifier: MIT | | 4 | * SPDX-License-Identifier: MIT |
5 | * | | 5 | * |
6 | * Copyright © 2014-2016 Intel Corporation | | 6 | * Copyright © 2014-2016 Intel Corporation |
7 | */ | | 7 | */ |
8 | | | 8 | |
9 | #include <sys/cdefs.h> | | 9 | #include <sys/cdefs.h> |
10 | __KERNEL_RCSID(0, "$NetBSD: i915_gem_internal.c,v 1.2 2021/12/18 23:45:30 riastradh Exp $"); | | 10 | __KERNEL_RCSID(0, "$NetBSD: i915_gem_internal.c,v 1.3 2021/12/19 01:38:51 riastradh Exp $"); |
11 | | | 11 | |
12 | #include <linux/scatterlist.h> | | 12 | #include <linux/scatterlist.h> |
13 | #include <linux/slab.h> | | 13 | #include <linux/slab.h> |
14 | #include <linux/swiotlb.h> | | 14 | #include <linux/swiotlb.h> |
15 | | | 15 | |
16 | #include <drm/i915_drm.h> | | 16 | #include <drm/i915_drm.h> |
17 | | | 17 | |
18 | #include "i915_drv.h" | | 18 | #include "i915_drv.h" |
19 | #include "i915_gem.h" | | 19 | #include "i915_gem.h" |
20 | #include "i915_gem_object.h" | | 20 | #include "i915_gem_object.h" |
21 | #include "i915_scatterlist.h" | | 21 | #include "i915_scatterlist.h" |
22 | #include "i915_utils.h" | | 22 | #include "i915_utils.h" |
23 | | | 23 | |
| | | 24 | #ifndef __NetBSD__ |
24 | #define QUIET (__GFP_NORETRY | __GFP_NOWARN) | | 25 | #define QUIET (__GFP_NORETRY | __GFP_NOWARN) |
25 | #define MAYFAIL (__GFP_RETRY_MAYFAIL | __GFP_NOWARN) | | 26 | #define MAYFAIL (__GFP_RETRY_MAYFAIL | __GFP_NOWARN) |
26 | | | 27 | |
27 | static void internal_free_pages(struct sg_table *st) | | 28 | static void internal_free_pages(struct sg_table *st) |
28 | { | | 29 | { |
29 | struct scatterlist *sg; | | 30 | struct scatterlist *sg; |
30 | | | 31 | |
31 | for (sg = st->sgl; sg; sg = __sg_next(sg)) { | | 32 | for (sg = st->sgl; sg; sg = __sg_next(sg)) { |
32 | if (sg_page(sg)) | | 33 | if (sg_page(sg)) |
33 | __free_pages(sg_page(sg), get_order(sg->length)); | | 34 | __free_pages(sg_page(sg), get_order(sg->length)); |
34 | } | | 35 | } |
35 | | | 36 | |
36 | sg_free_table(st); | | 37 | sg_free_table(st); |
37 | kfree(st); | | 38 | kfree(st); |
38 | } | | 39 | } |
| | | 40 | #endif |
39 | | | 41 | |
40 | static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) | | 42 | static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) |
41 | { | | 43 | { |
42 | struct drm_i915_private *i915 = to_i915(obj->base.dev); | | 44 | struct drm_i915_private *i915 = to_i915(obj->base.dev); |
| | | 45 | #ifdef __NetBSD__ |
| | | 46 | bus_dma_tag_t dmat = i915->drm.dmat; |
| | | 47 | bus_dmamap_t map; |
| | | 48 | size_t nsegs; |
| | | 49 | unsigned sizes, seg; |
| | | 50 | int ret; |
| | | 51 | |
| | | 52 | KASSERT(obj->mm.segs == NULL); |
| | | 53 | nsegs = obj->mm.nsegs = obj->base.size/PAGE_SIZE; |
| | | 54 | if (nsegs > UINT_MAX || nsegs > SIZE_MAX/sizeof(obj->mm.segs[0])) |
| | | 55 | return -ENOMEM; |
| | | 56 | obj->mm.segs = kmem_alloc(nsegs * sizeof(obj->mm.segs[0]), KM_NOSLEEP); |
| | | 57 | if (obj->mm.segs == NULL) |
| | | 58 | return -ENOMEM; |
| | | 59 | |
| | | 60 | /* XXX errno NetBSD->Linux */ |
| | | 61 | ret = -bus_dmamem_alloc(dmat, obj->base.size, PAGE_SIZE, 0, |
| | | 62 | obj->mm.segs, nsegs, &obj->mm.rsegs, BUS_DMA_NOWAIT); |
| | | 63 | if (ret) |
| | | 64 | goto out0; |
| | | 65 | |
| | | 66 | /* XXX errno NetBSD->Linux */ |
| | | 67 | ret = -bus_dmamap_create(dmat, obj->base.size, obj->mm.rsegs, |
| | | 68 | obj->base.size, 0, BUS_DMA_NOWAIT, &map); |
| | | 69 | if (ret) |
| | | 70 | goto out1; |
| | | 71 | |
| | | 72 | /* XXX errno NetBSD->Linux */ |
| | | 73 | ret = -bus_dmamap_load_raw(dmat, map, obj->mm.segs, obj->mm.rsegs, |
| | | 74 | obj->base.size, BUS_DMA_NOWAIT); |
| | | 75 | if (ret) |
| | | 76 | goto out2; |
| | | 77 | |
| | | 78 | ret = i915_gem_gtt_prepare_pages(obj, map); |
| | | 79 | if (ret) |
| | | 80 | goto out3; |
| | | 81 | |
| | | 82 | for (sizes = 0, seg = 0; seg < map->dm_nsegs; seg++) |
| | | 83 | sizes |= map->dm_segs[seg].ds_len; |
| | | 84 | |
| | | 85 | obj->mm.madv = I915_MADV_DONTNEED; |
| | | 86 | __i915_gem_object_set_pages(obj, map, sizes); |
| | | 87 | |
| | | 88 | return 0; |
| | | 89 | |
| | | 90 | out4: __unused |
| | | 91 | i915_gem_gtt_finish_pages(obj, map); |
| | | 92 | out3: bus_dmamap_unload(dmat, obj->mm.pages); |
| | | 93 | out2: bus_dmamap_destroy(dmat, obj->mm.pages); |
| | | 94 | out1: bus_dmamem_free(dmat, obj->mm.segs, obj->mm.rsegs); |
| | | 95 | out0: kmem_free(obj->mm.segs, nsegs * sizeof(obj->mm.segs[0])); |
| | | 96 | return ret; |
| | | 97 | #else |
43 | struct sg_table *st; | | 98 | struct sg_table *st; |
44 | struct scatterlist *sg; | | 99 | struct scatterlist *sg; |
45 | unsigned int sg_page_sizes; | | 100 | unsigned int sg_page_sizes; |
46 | unsigned int npages; | | 101 | unsigned int npages; |
47 | int max_order; | | 102 | int max_order; |
48 | gfp_t gfp; | | 103 | gfp_t gfp; |
49 | | | 104 | |
50 | max_order = MAX_ORDER; | | 105 | max_order = MAX_ORDER; |
51 | #ifdef CONFIG_SWIOTLB | | 106 | #ifdef CONFIG_SWIOTLB |
52 | if (swiotlb_nr_tbl()) { | | 107 | if (swiotlb_nr_tbl()) { |
53 | unsigned int max_segment; | | 108 | unsigned int max_segment; |
54 | | | 109 | |
55 | max_segment = swiotlb_max_segment(); | | 110 | max_segment = swiotlb_max_segment(); |
| @@ -122,33 +177,50 @@ create_st: | | | @@ -122,33 +177,50 @@ create_st: |
122 | goto err; | | 177 | goto err; |
123 | } | | 178 | } |
124 | | | 179 | |
125 | __i915_gem_object_set_pages(obj, st, sg_page_sizes); | | 180 | __i915_gem_object_set_pages(obj, st, sg_page_sizes); |
126 | | | 181 | |
127 | return 0; | | 182 | return 0; |
128 | | | 183 | |
129 | err: | | 184 | err: |
130 | sg_set_page(sg, NULL, 0, 0); | | 185 | sg_set_page(sg, NULL, 0, 0); |
131 | sg_mark_end(sg); | | 186 | sg_mark_end(sg); |
132 | internal_free_pages(st); | | 187 | internal_free_pages(st); |
133 | | | 188 | |
134 | return -ENOMEM; | | 189 | return -ENOMEM; |
| | | 190 | #endif |
135 | } | | 191 | } |
136 | | | 192 | |
| | | 193 | #ifdef __NetBSD__ |
| | | 194 | static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj, |
| | | 195 | bus_dmamap_t pages) |
| | | 196 | #else |
137 | static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj, | | 197 | static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj, |
138 | struct sg_table *pages) | | 198 | struct sg_table *pages) |
| | | 199 | #endif |
139 | { | | 200 | { |
140 | i915_gem_gtt_finish_pages(obj, pages); | | 201 | i915_gem_gtt_finish_pages(obj, pages); |
| | | 202 | #ifdef __NetBSD__ |
| | | 203 | bus_dma_tag_t dmat = obj->base.dev->dmat; |
| | | 204 | bus_dmamap_unload(dmat, pages); |
| | | 205 | bus_dmamap_destroy(dmat, pages); |
| | | 206 | bus_dmamem_free(dmat, obj->mm.segs, obj->mm.rsegs); |
| | | 207 | obj->mm.rsegs = 0; |
| | | 208 | kmem_free(obj->mm.segs, obj->mm.nsegs * sizeof(obj->mm.segs[0])); |
| | | 209 | obj->mm.segs = NULL; |
| | | 210 | obj->mm.nsegs = 0; |
| | | 211 | #else |
141 | internal_free_pages(pages); | | 212 | internal_free_pages(pages); |
| | | 213 | #endif |
142 | | | 214 | |
143 | obj->mm.dirty = false; | | 215 | obj->mm.dirty = false; |
144 | } | | 216 | } |
145 | | | 217 | |
146 | static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = { | | 218 | static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = { |
147 | .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | | | 219 | .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | |
148 | I915_GEM_OBJECT_IS_SHRINKABLE, | | 220 | I915_GEM_OBJECT_IS_SHRINKABLE, |
149 | .get_pages = i915_gem_object_get_pages_internal, | | 221 | .get_pages = i915_gem_object_get_pages_internal, |
150 | .put_pages = i915_gem_object_put_pages_internal, | | 222 | .put_pages = i915_gem_object_put_pages_internal, |
151 | }; | | 223 | }; |
152 | | | 224 | |
153 | /** | | 225 | /** |
154 | * i915_gem_object_create_internal: create an object with volatile pages | | 226 | * i915_gem_object_create_internal: create an object with volatile pages |