| @@ -1,1041 +1,1040 @@ | | | @@ -1,1041 +1,1040 @@ |
1 | /** | | 1 | /** |
2 | * \file drm_bufs.c | | 2 | * \file drm_bufs.c |
3 | * Generic buffer template | | 3 | * Generic buffer template |
4 | * | | 4 | * |
5 | * \author Rickard E. (Rik) Faith <faith@valinux.com> | | 5 | * \author Rickard E. (Rik) Faith <faith@valinux.com> |
6 | * \author Gareth Hughes <gareth@valinux.com> | | 6 | * \author Gareth Hughes <gareth@valinux.com> |
7 | */ | | 7 | */ |
8 | | | 8 | |
9 | /* | | 9 | /* |
10 | * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com | | 10 | * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com |
11 | * | | 11 | * |
12 | * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. | | 12 | * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. |
13 | * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. | | 13 | * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. |
14 | * All Rights Reserved. | | 14 | * All Rights Reserved. |
15 | * | | 15 | * |
16 | * Permission is hereby granted, free of charge, to any person obtaining a | | 16 | * Permission is hereby granted, free of charge, to any person obtaining a |
17 | * copy of this software and associated documentation files (the "Software"), | | 17 | * copy of this software and associated documentation files (the "Software"), |
18 | * to deal in the Software without restriction, including without limitation | | 18 | * to deal in the Software without restriction, including without limitation |
19 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | | 19 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
20 | * and/or sell copies of the Software, and to permit persons to whom the | | 20 | * and/or sell copies of the Software, and to permit persons to whom the |
21 | * Software is furnished to do so, subject to the following conditions: | | 21 | * Software is furnished to do so, subject to the following conditions: |
22 | * | | 22 | * |
23 | * The above copyright notice and this permission notice (including the next | | 23 | * The above copyright notice and this permission notice (including the next |
24 | * paragraph) shall be included in all copies or substantial portions of the | | 24 | * paragraph) shall be included in all copies or substantial portions of the |
25 | * Software. | | 25 | * Software. |
26 | * | | 26 | * |
27 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | | 27 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
28 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | | 28 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
29 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | | 29 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
30 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | | 30 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR |
31 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | | 31 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
32 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | | 32 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
33 | * OTHER DEALINGS IN THE SOFTWARE. | | 33 | * OTHER DEALINGS IN THE SOFTWARE. |
34 | */ | | 34 | */ |
35 | | | 35 | |
36 | #include <linux/vmalloc.h> | | 36 | #include <linux/vmalloc.h> |
37 | #include <linux/slab.h> | | 37 | #include <linux/slab.h> |
38 | #include <linux/sched.h> | | 38 | #include <linux/sched.h> |
39 | #include <linux/log2.h> | | 39 | #include <linux/log2.h> |
40 | #include <linux/export.h> | | 40 | #include <linux/export.h> |
41 | #include <linux/mm.h> | | 41 | #include <linux/mm.h> |
42 | #include <asm/mtrr.h> | | | |
43 | #include <asm/shmparam.h> | | 42 | #include <asm/shmparam.h> |
44 | #include <drm/drmP.h> | | 43 | #include <drm/drmP.h> |
45 | | | 44 | |
46 | static struct drm_map_list *drm_find_matching_map(struct drm_device *dev, | | 45 | static struct drm_map_list *drm_find_matching_map(struct drm_device *dev, |
47 | struct drm_local_map *map) | | 46 | struct drm_local_map *map) |
48 | { | | 47 | { |
49 | struct drm_map_list *entry; | | 48 | struct drm_map_list *entry; |
50 | list_for_each_entry(entry, &dev->maplist, head) { | | 49 | list_for_each_entry(entry, &dev->maplist, head) { |
51 | /* | | 50 | /* |
52 | * Because the kernel-userspace ABI is fixed at a 32-bit offset | | 51 | * Because the kernel-userspace ABI is fixed at a 32-bit offset |
53 | * while PCI resources may live above that, we only compare the | | 52 | * while PCI resources may live above that, we only compare the |
54 | * lower 32 bits of the map offset for maps of type | | 53 | * lower 32 bits of the map offset for maps of type |
55 | * _DRM_FRAMEBUFFER or _DRM_REGISTERS. | | 54 | * _DRM_FRAMEBUFFER or _DRM_REGISTERS. |
56 | * It is assumed that if a driver have more than one resource | | 55 | * It is assumed that if a driver have more than one resource |
57 | * of each type, the lower 32 bits are different. | | 56 | * of each type, the lower 32 bits are different. |
58 | */ | | 57 | */ |
59 | if (!entry->map || | | 58 | if (!entry->map || |
60 | map->type != entry->map->type || | | 59 | map->type != entry->map->type || |
61 | entry->master != dev->primary->master) | | 60 | entry->master != dev->primary->master) |
62 | continue; | | 61 | continue; |
63 | switch (map->type) { | | 62 | switch (map->type) { |
64 | case _DRM_SHM: | | 63 | case _DRM_SHM: |
65 | if (map->flags != _DRM_CONTAINS_LOCK) | | 64 | if (map->flags != _DRM_CONTAINS_LOCK) |
66 | break; | | 65 | break; |
67 | return entry; | | 66 | return entry; |
68 | case _DRM_REGISTERS: | | 67 | case _DRM_REGISTERS: |
69 | case _DRM_FRAME_BUFFER: | | 68 | case _DRM_FRAME_BUFFER: |
70 | if ((entry->map->offset & 0xffffffff) == | | 69 | if ((entry->map->offset & 0xffffffff) == |
71 | (map->offset & 0xffffffff)) | | 70 | (map->offset & 0xffffffff)) |
72 | return entry; | | 71 | return entry; |
73 | default: /* Make gcc happy */ | | 72 | default: /* Make gcc happy */ |
74 | ; | | 73 | ; |
75 | } | | 74 | } |
76 | if (entry->map->offset == map->offset) | | 75 | if (entry->map->offset == map->offset) |
77 | return entry; | | 76 | return entry; |
78 | } | | 77 | } |
79 | | | 78 | |
80 | return NULL; | | 79 | return NULL; |
81 | } | | 80 | } |
82 | | | 81 | |
83 | static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash, | | 82 | static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash, |
84 | unsigned long user_token, int hashed_handle, int shm) | | 83 | unsigned long user_token, int hashed_handle, int shm) |
85 | { | | 84 | { |
86 | int use_hashed_handle, shift; | | 85 | int use_hashed_handle, shift; |
87 | unsigned long add; | | 86 | unsigned long add; |
88 | | | 87 | |
89 | use_hashed_handle = (user_token &~ 0xffffffffUL) || hashed_handle; | | 88 | use_hashed_handle = (user_token &~ 0xffffffffUL) || hashed_handle; |
90 | if (!use_hashed_handle) { | | 89 | if (!use_hashed_handle) { |
91 | int ret; | | 90 | int ret; |
92 | hash->key = user_token >> PAGE_SHIFT; | | 91 | hash->key = user_token >> PAGE_SHIFT; |
93 | ret = drm_ht_insert_item(&dev->map_hash, hash); | | 92 | ret = drm_ht_insert_item(&dev->map_hash, hash); |
94 | if (ret != -EINVAL) | | 93 | if (ret != -EINVAL) |
95 | return ret; | | 94 | return ret; |
96 | } | | 95 | } |
97 | | | 96 | |
98 | shift = 0; | | 97 | shift = 0; |
99 | add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT; | | 98 | add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT; |
100 | if (shm && (SHMLBA > PAGE_SIZE)) { | | 99 | if (shm && (SHMLBA > PAGE_SIZE)) { |
101 | int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1; | | 100 | int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1; |
102 | | | 101 | |
103 | /* For shared memory, we have to preserve the SHMLBA | | 102 | /* For shared memory, we have to preserve the SHMLBA |
104 | * bits of the eventual vma->vm_pgoff value during | | 103 | * bits of the eventual vma->vm_pgoff value during |
105 | * mmap(). Otherwise we run into cache aliasing problems | | 104 | * mmap(). Otherwise we run into cache aliasing problems |
106 | * on some platforms. On these platforms, the pgoff of | | 105 | * on some platforms. On these platforms, the pgoff of |
107 | * a mmap() request is used to pick a suitable virtual | | 106 | * a mmap() request is used to pick a suitable virtual |
108 | * address for the mmap() region such that it will not | | 107 | * address for the mmap() region such that it will not |
109 | * cause cache aliasing problems. | | 108 | * cause cache aliasing problems. |
110 | * | | 109 | * |
111 | * Therefore, make sure the SHMLBA relevant bits of the | | 110 | * Therefore, make sure the SHMLBA relevant bits of the |
112 | * hash value we use are equal to those in the original | | 111 | * hash value we use are equal to those in the original |
113 | * kernel virtual address. | | 112 | * kernel virtual address. |
114 | */ | | 113 | */ |
115 | shift = bits; | | 114 | shift = bits; |
116 | add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL)); | | 115 | add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL)); |
117 | } | | 116 | } |
118 | | | 117 | |
119 | return drm_ht_just_insert_please(&dev->map_hash, hash, | | 118 | return drm_ht_just_insert_please(&dev->map_hash, hash, |
120 | user_token, 32 - PAGE_SHIFT - 3, | | 119 | user_token, 32 - PAGE_SHIFT - 3, |
121 | shift, add); | | 120 | shift, add); |
122 | } | | 121 | } |
123 | | | 122 | |
124 | /** | | 123 | /** |
125 | * Core function to create a range of memory available for mapping by a | | 124 | * Core function to create a range of memory available for mapping by a |
126 | * non-root process. | | 125 | * non-root process. |
127 | * | | 126 | * |
128 | * Adjusts the memory offset to its absolute value according to the mapping | | 127 | * Adjusts the memory offset to its absolute value according to the mapping |
129 | * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where | | 128 | * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where |
130 | * applicable and if supported by the kernel. | | 129 | * applicable and if supported by the kernel. |
131 | */ | | 130 | */ |
132 | static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, | | 131 | static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, |
133 | unsigned int size, enum drm_map_type type, | | 132 | unsigned int size, enum drm_map_type type, |
134 | enum drm_map_flags flags, | | 133 | enum drm_map_flags flags, |
135 | struct drm_map_list ** maplist) | | 134 | struct drm_map_list ** maplist) |
136 | { | | 135 | { |
137 | struct drm_local_map *map; | | 136 | struct drm_local_map *map; |
138 | struct drm_map_list *list; | | 137 | struct drm_map_list *list; |
139 | drm_dma_handle_t *dmah; | | 138 | drm_dma_handle_t *dmah; |
140 | unsigned long user_token; | | 139 | unsigned long user_token; |
141 | int ret; | | 140 | int ret; |
142 | | | 141 | |
143 | map = kmalloc(sizeof(*map), GFP_KERNEL); | | 142 | map = kmalloc(sizeof(*map), GFP_KERNEL); |
144 | if (!map) | | 143 | if (!map) |
145 | return -ENOMEM; | | 144 | return -ENOMEM; |
146 | | | 145 | |
147 | map->offset = offset; | | 146 | map->offset = offset; |
148 | map->size = size; | | 147 | map->size = size; |
149 | map->flags = flags; | | 148 | map->flags = flags; |
150 | map->type = type; | | 149 | map->type = type; |
151 | | | 150 | |
152 | /* Only allow shared memory to be removable since we only keep enough | | 151 | /* Only allow shared memory to be removable since we only keep enough |
153 | * book keeping information about shared memory to allow for removal | | 152 | * book keeping information about shared memory to allow for removal |
154 | * when processes fork. | | 153 | * when processes fork. |
155 | */ | | 154 | */ |
156 | if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) { | | 155 | if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) { |
157 | kfree(map); | | 156 | kfree(map); |
158 | return -EINVAL; | | 157 | return -EINVAL; |
159 | } | | 158 | } |
160 | DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n", | | 159 | DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n", |
161 | (unsigned long long)map->offset, map->size, map->type); | | 160 | (unsigned long long)map->offset, map->size, map->type); |
162 | | | 161 | |
163 | /* page-align _DRM_SHM maps. They are allocated here so there is no security | | 162 | /* page-align _DRM_SHM maps. They are allocated here so there is no security |
164 | * hole created by that and it works around various broken drivers that use | | 163 | * hole created by that and it works around various broken drivers that use |
165 | * a non-aligned quantity to map the SAREA. --BenH | | 164 | * a non-aligned quantity to map the SAREA. --BenH |
166 | */ | | 165 | */ |
167 | if (map->type == _DRM_SHM) | | 166 | if (map->type == _DRM_SHM) |
168 | map->size = PAGE_ALIGN(map->size); | | 167 | map->size = PAGE_ALIGN(map->size); |
169 | | | 168 | |
170 | if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) { | | 169 | if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) { |
171 | kfree(map); | | 170 | kfree(map); |
172 | return -EINVAL; | | 171 | return -EINVAL; |
173 | } | | 172 | } |
174 | map->mtrr = -1; | | 173 | map->mtrr = -1; |
175 | map->handle = NULL; | | 174 | map->handle = NULL; |
176 | | | 175 | |
177 | switch (map->type) { | | 176 | switch (map->type) { |
178 | case _DRM_REGISTERS: | | 177 | case _DRM_REGISTERS: |
179 | case _DRM_FRAME_BUFFER: | | 178 | case _DRM_FRAME_BUFFER: |
180 | #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__) | | 179 | #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__) |
181 | if (map->offset + (map->size-1) < map->offset || | | 180 | if (map->offset + (map->size-1) < map->offset || |
182 | map->offset < virt_to_phys(high_memory)) { | | 181 | map->offset < virt_to_phys(high_memory)) { |
183 | kfree(map); | | 182 | kfree(map); |
184 | return -EINVAL; | | 183 | return -EINVAL; |
185 | } | | 184 | } |
186 | #endif | | 185 | #endif |
187 | /* Some drivers preinitialize some maps, without the X Server | | 186 | /* Some drivers preinitialize some maps, without the X Server |
188 | * needing to be aware of it. Therefore, we just return success | | 187 | * needing to be aware of it. Therefore, we just return success |
189 | * when the server tries to create a duplicate map. | | 188 | * when the server tries to create a duplicate map. |
190 | */ | | 189 | */ |
191 | list = drm_find_matching_map(dev, map); | | 190 | list = drm_find_matching_map(dev, map); |
192 | if (list != NULL) { | | 191 | if (list != NULL) { |
193 | if (list->map->size != map->size) { | | 192 | if (list->map->size != map->size) { |
194 | DRM_DEBUG("Matching maps of type %d with " | | 193 | DRM_DEBUG("Matching maps of type %d with " |
195 | "mismatched sizes, (%ld vs %ld)\n", | | 194 | "mismatched sizes, (%ld vs %ld)\n", |
196 | map->type, map->size, | | 195 | map->type, map->size, |
197 | list->map->size); | | 196 | list->map->size); |
198 | list->map->size = map->size; | | 197 | list->map->size = map->size; |
199 | } | | 198 | } |
200 | | | 199 | |
201 | kfree(map); | | 200 | kfree(map); |
202 | *maplist = list; | | 201 | *maplist = list; |
203 | return 0; | | 202 | return 0; |
204 | } | | 203 | } |
205 | | | 204 | |
206 | if (drm_core_has_MTRR(dev)) { | | 205 | if (drm_core_has_MTRR(dev)) { |
207 | if (map->type == _DRM_FRAME_BUFFER || | | 206 | if (map->type == _DRM_FRAME_BUFFER || |
208 | (map->flags & _DRM_WRITE_COMBINING)) { | | 207 | (map->flags & _DRM_WRITE_COMBINING)) { |
209 | map->mtrr = mtrr_add(map->offset, map->size, | | 208 | map->mtrr = mtrr_add(map->offset, map->size, |
210 | MTRR_TYPE_WRCOMB, 1); | | 209 | MTRR_TYPE_WRCOMB, 1); |
211 | } | | 210 | } |
212 | } | | 211 | } |
213 | if (map->type == _DRM_REGISTERS) { | | 212 | if (map->type == _DRM_REGISTERS) { |
214 | #ifdef __NetBSD__ | | 213 | #ifdef __NetBSD__ |
215 | map->handle = drm_ioremap(dev, map); | | 214 | map->handle = drm_ioremap(dev, map); |
216 | #else | | 215 | #else |
217 | map->handle = ioremap(map->offset, map->size); | | 216 | map->handle = ioremap(map->offset, map->size); |
218 | #endif | | 217 | #endif |
219 | if (!map->handle) { | | 218 | if (!map->handle) { |
220 | kfree(map); | | 219 | kfree(map); |
221 | return -ENOMEM; | | 220 | return -ENOMEM; |
222 | } | | 221 | } |
223 | } | | 222 | } |
224 | | | 223 | |
225 | break; | | 224 | break; |
226 | case _DRM_SHM: | | 225 | case _DRM_SHM: |
227 | list = drm_find_matching_map(dev, map); | | 226 | list = drm_find_matching_map(dev, map); |
228 | if (list != NULL) { | | 227 | if (list != NULL) { |
229 | if(list->map->size != map->size) { | | 228 | if(list->map->size != map->size) { |
230 | DRM_DEBUG("Matching maps of type %d with " | | 229 | DRM_DEBUG("Matching maps of type %d with " |
231 | "mismatched sizes, (%ld vs %ld)\n", | | 230 | "mismatched sizes, (%ld vs %ld)\n", |
232 | map->type, map->size, list->map->size); | | 231 | map->type, map->size, list->map->size); |
233 | list->map->size = map->size; | | 232 | list->map->size = map->size; |
234 | } | | 233 | } |
235 | | | 234 | |
236 | kfree(map); | | 235 | kfree(map); |
237 | *maplist = list; | | 236 | *maplist = list; |
238 | return 0; | | 237 | return 0; |
239 | } | | 238 | } |
240 | map->handle = vmalloc_user(map->size); | | 239 | map->handle = vmalloc_user(map->size); |
241 | DRM_DEBUG("%lu %d %p\n", | | 240 | DRM_DEBUG("%lu %d %p\n", |
242 | map->size, drm_order(map->size), map->handle); | | 241 | map->size, drm_order(map->size), map->handle); |
243 | if (!map->handle) { | | 242 | if (!map->handle) { |
244 | kfree(map); | | 243 | kfree(map); |
245 | return -ENOMEM; | | 244 | return -ENOMEM; |
246 | } | | 245 | } |
247 | map->offset = (unsigned long)map->handle; | | 246 | map->offset = (unsigned long)map->handle; |
248 | if (map->flags & _DRM_CONTAINS_LOCK) { | | 247 | if (map->flags & _DRM_CONTAINS_LOCK) { |
249 | /* Prevent a 2nd X Server from creating a 2nd lock */ | | 248 | /* Prevent a 2nd X Server from creating a 2nd lock */ |
250 | if (dev->primary->master->lock.hw_lock != NULL) { | | 249 | if (dev->primary->master->lock.hw_lock != NULL) { |
251 | vfree(map->handle); | | 250 | vfree(map->handle); |
252 | kfree(map); | | 251 | kfree(map); |
253 | return -EBUSY; | | 252 | return -EBUSY; |
254 | } | | 253 | } |
255 | dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle; /* Pointer to lock */ | | 254 | dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle; /* Pointer to lock */ |
256 | } | | 255 | } |
257 | break; | | 256 | break; |
258 | case _DRM_AGP: { | | 257 | case _DRM_AGP: { |
259 | struct drm_agp_mem *entry; | | 258 | struct drm_agp_mem *entry; |
260 | int valid = 0; | | 259 | int valid = 0; |
261 | | | 260 | |
262 | if (!drm_core_has_AGP(dev)) { | | 261 | if (!drm_core_has_AGP(dev)) { |
263 | kfree(map); | | 262 | kfree(map); |
264 | return -EINVAL; | | 263 | return -EINVAL; |
265 | } | | 264 | } |
266 | #ifdef __alpha__ | | 265 | #ifdef __alpha__ |
267 | map->offset += dev->hose->mem_space->start; | | 266 | map->offset += dev->hose->mem_space->start; |
268 | #endif | | 267 | #endif |
269 | /* In some cases (i810 driver), user space may have already | | 268 | /* In some cases (i810 driver), user space may have already |
270 | * added the AGP base itself, because dev->agp->base previously | | 269 | * added the AGP base itself, because dev->agp->base previously |
271 | * only got set during AGP enable. So, only add the base | | 270 | * only got set during AGP enable. So, only add the base |
272 | * address if the map's offset isn't already within the | | 271 | * address if the map's offset isn't already within the |
273 | * aperture. | | 272 | * aperture. |
274 | */ | | 273 | */ |
275 | #ifdef __NetBSD__ | | 274 | #ifdef __NetBSD__ |
276 | if (map->offset < dev->agp->base || | | 275 | if (map->offset < dev->agp->base || |
277 | map->offset > dev->agp->base + | | 276 | map->offset > dev->agp->base + |
278 | dev->agp->agp_info.ai_aperture_size - 1) { | | 277 | dev->agp->agp_info.ai_aperture_size - 1) { |
279 | map->offset += dev->agp->base; | | 278 | map->offset += dev->agp->base; |
280 | } | | 279 | } |
281 | #else | | 280 | #else |
282 | if (map->offset < dev->agp->base || | | 281 | if (map->offset < dev->agp->base || |
283 | map->offset > dev->agp->base + | | 282 | map->offset > dev->agp->base + |
284 | dev->agp->agp_info.aper_size * 1024 * 1024 - 1) { | | 283 | dev->agp->agp_info.aper_size * 1024 * 1024 - 1) { |
285 | map->offset += dev->agp->base; | | 284 | map->offset += dev->agp->base; |
286 | } | | 285 | } |
287 | #endif | | 286 | #endif |
288 | map->mtrr = dev->agp->agp_mtrr; /* for getmap */ | | 287 | map->mtrr = dev->agp->agp_mtrr; /* for getmap */ |
289 | | | 288 | |
290 | /* This assumes the DRM is in total control of AGP space. | | 289 | /* This assumes the DRM is in total control of AGP space. |
291 | * It's not always the case as AGP can be in the control | | 290 | * It's not always the case as AGP can be in the control |
292 | * of user space (i.e. i810 driver). So this loop will get | | 291 | * of user space (i.e. i810 driver). So this loop will get |
293 | * skipped and we double check that dev->agp->memory is | | 292 | * skipped and we double check that dev->agp->memory is |
294 | * actually set as well as being invalid before EPERM'ing | | 293 | * actually set as well as being invalid before EPERM'ing |
295 | */ | | 294 | */ |
296 | list_for_each_entry(entry, &dev->agp->memory, head) { | | 295 | list_for_each_entry(entry, &dev->agp->memory, head) { |
297 | if ((map->offset >= entry->bound) && | | 296 | if ((map->offset >= entry->bound) && |
298 | (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) { | | 297 | (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) { |
299 | valid = 1; | | 298 | valid = 1; |
300 | break; | | 299 | break; |
301 | } | | 300 | } |
302 | } | | 301 | } |
303 | if (!list_empty(&dev->agp->memory) && !valid) { | | 302 | if (!list_empty(&dev->agp->memory) && !valid) { |
304 | kfree(map); | | 303 | kfree(map); |
305 | return -EPERM; | | 304 | return -EPERM; |
306 | } | | 305 | } |
307 | DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n", | | 306 | DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n", |
308 | (unsigned long long)map->offset, map->size); | | 307 | (unsigned long long)map->offset, map->size); |
309 | | | 308 | |
310 | break; | | 309 | break; |
311 | } | | 310 | } |
312 | case _DRM_GEM: | | 311 | case _DRM_GEM: |
313 | DRM_ERROR("tried to addmap GEM object\n"); | | 312 | DRM_ERROR("tried to addmap GEM object\n"); |
314 | break; | | 313 | break; |
315 | case _DRM_SCATTER_GATHER: | | 314 | case _DRM_SCATTER_GATHER: |
316 | if (!dev->sg) { | | 315 | if (!dev->sg) { |
317 | kfree(map); | | 316 | kfree(map); |
318 | return -EINVAL; | | 317 | return -EINVAL; |
319 | } | | 318 | } |
320 | map->offset += (unsigned long)dev->sg->virtual; | | 319 | map->offset += (unsigned long)dev->sg->virtual; |
321 | break; | | 320 | break; |
322 | case _DRM_CONSISTENT: | | 321 | case _DRM_CONSISTENT: |
323 | /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G, | | 322 | /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G, |
324 | * As we're limiting the address to 2^32-1 (or less), | | 323 | * As we're limiting the address to 2^32-1 (or less), |
325 | * casting it down to 32 bits is no problem, but we | | 324 | * casting it down to 32 bits is no problem, but we |
326 | * need to point to a 64bit variable first. */ | | 325 | * need to point to a 64bit variable first. */ |
327 | dmah = drm_pci_alloc(dev, map->size, map->size); | | 326 | dmah = drm_pci_alloc(dev, map->size, map->size); |
328 | if (!dmah) { | | 327 | if (!dmah) { |
329 | kfree(map); | | 328 | kfree(map); |
330 | return -ENOMEM; | | 329 | return -ENOMEM; |
331 | } | | 330 | } |
332 | map->handle = dmah->vaddr; | | 331 | map->handle = dmah->vaddr; |
333 | map->offset = (unsigned long)dmah->busaddr; | | 332 | map->offset = (unsigned long)dmah->busaddr; |
334 | #ifdef __NetBSD__ | | 333 | #ifdef __NetBSD__ |
335 | map->lm_data.dmah = dmah; | | 334 | map->lm_data.dmah = dmah; |
336 | #else | | 335 | #else |
337 | kfree(dmah); | | 336 | kfree(dmah); |
338 | #endif | | 337 | #endif |
339 | break; | | 338 | break; |
340 | default: | | 339 | default: |
341 | kfree(map); | | 340 | kfree(map); |
342 | return -EINVAL; | | 341 | return -EINVAL; |
343 | } | | 342 | } |
344 | | | 343 | |
345 | list = kzalloc(sizeof(*list), GFP_KERNEL); | | 344 | list = kzalloc(sizeof(*list), GFP_KERNEL); |
346 | if (!list) { | | 345 | if (!list) { |
347 | if (map->type == _DRM_REGISTERS) | | 346 | if (map->type == _DRM_REGISTERS) |
348 | #ifdef __NetBSD__ | | 347 | #ifdef __NetBSD__ |
349 | drm_iounmap(dev, map); | | 348 | drm_iounmap(dev, map); |
350 | #else | | 349 | #else |
351 | iounmap(map->handle); | | 350 | iounmap(map->handle); |
352 | #endif | | 351 | #endif |
353 | kfree(map); | | 352 | kfree(map); |
354 | return -EINVAL; | | 353 | return -EINVAL; |
355 | } | | 354 | } |
356 | list->map = map; | | 355 | list->map = map; |
357 | | | 356 | |
358 | mutex_lock(&dev->struct_mutex); | | 357 | mutex_lock(&dev->struct_mutex); |
359 | list_add(&list->head, &dev->maplist); | | 358 | list_add(&list->head, &dev->maplist); |
360 | | | 359 | |
361 | /* Assign a 32-bit handle */ | | 360 | /* Assign a 32-bit handle */ |
362 | /* We do it here so that dev->struct_mutex protects the increment */ | | 361 | /* We do it here so that dev->struct_mutex protects the increment */ |
363 | user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle : | | 362 | user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle : |
364 | map->offset; | | 363 | map->offset; |
365 | ret = drm_map_handle(dev, &list->hash, user_token, 0, | | 364 | ret = drm_map_handle(dev, &list->hash, user_token, 0, |
366 | (map->type == _DRM_SHM)); | | 365 | (map->type == _DRM_SHM)); |
367 | if (ret) { | | 366 | if (ret) { |
368 | if (map->type == _DRM_REGISTERS) | | 367 | if (map->type == _DRM_REGISTERS) |
369 | #ifdef __NetBSD__ /* XXX What about other map types...? */ | | 368 | #ifdef __NetBSD__ /* XXX What about other map types...? */ |
370 | drm_iounmap(dev, map); | | 369 | drm_iounmap(dev, map); |
371 | #else | | 370 | #else |
372 | iounmap(map->handle); | | 371 | iounmap(map->handle); |
373 | #endif | | 372 | #endif |
374 | kfree(map); | | 373 | kfree(map); |
375 | kfree(list); | | 374 | kfree(list); |
376 | mutex_unlock(&dev->struct_mutex); | | 375 | mutex_unlock(&dev->struct_mutex); |
377 | return ret; | | 376 | return ret; |
378 | } | | 377 | } |
379 | | | 378 | |
380 | list->user_token = list->hash.key << PAGE_SHIFT; | | 379 | list->user_token = list->hash.key << PAGE_SHIFT; |
381 | mutex_unlock(&dev->struct_mutex); | | 380 | mutex_unlock(&dev->struct_mutex); |
382 | | | 381 | |
383 | if (!(map->flags & _DRM_DRIVER)) | | 382 | if (!(map->flags & _DRM_DRIVER)) |
384 | list->master = dev->primary->master; | | 383 | list->master = dev->primary->master; |
385 | *maplist = list; | | 384 | *maplist = list; |
386 | return 0; | | 385 | return 0; |
387 | } | | 386 | } |
388 | | | 387 | |
389 | int drm_addmap(struct drm_device * dev, resource_size_t offset, | | 388 | int drm_addmap(struct drm_device * dev, resource_size_t offset, |
390 | unsigned int size, enum drm_map_type type, | | 389 | unsigned int size, enum drm_map_type type, |
391 | enum drm_map_flags flags, struct drm_local_map ** map_ptr) | | 390 | enum drm_map_flags flags, struct drm_local_map ** map_ptr) |
392 | { | | 391 | { |
393 | struct drm_map_list *list; | | 392 | struct drm_map_list *list; |
394 | int rc; | | 393 | int rc; |
395 | | | 394 | |
396 | rc = drm_addmap_core(dev, offset, size, type, flags, &list); | | 395 | rc = drm_addmap_core(dev, offset, size, type, flags, &list); |
397 | if (!rc) | | 396 | if (!rc) |
398 | *map_ptr = list->map; | | 397 | *map_ptr = list->map; |
399 | return rc; | | 398 | return rc; |
400 | } | | 399 | } |
401 | | | 400 | |
402 | EXPORT_SYMBOL(drm_addmap); | | 401 | EXPORT_SYMBOL(drm_addmap); |
403 | | | 402 | |
404 | /** | | 403 | /** |
405 | * Ioctl to specify a range of memory that is available for mapping by a | | 404 | * Ioctl to specify a range of memory that is available for mapping by a |
406 | * non-root process. | | 405 | * non-root process. |
407 | * | | 406 | * |
408 | * \param inode device inode. | | 407 | * \param inode device inode. |
409 | * \param file_priv DRM file private. | | 408 | * \param file_priv DRM file private. |
410 | * \param cmd command. | | 409 | * \param cmd command. |
411 | * \param arg pointer to a drm_map structure. | | 410 | * \param arg pointer to a drm_map structure. |
412 | * \return zero on success or a negative value on error. | | 411 | * \return zero on success or a negative value on error. |
413 | * | | 412 | * |
414 | */ | | 413 | */ |
415 | int drm_addmap_ioctl(struct drm_device *dev, void *data, | | 414 | int drm_addmap_ioctl(struct drm_device *dev, void *data, |
416 | struct drm_file *file_priv) | | 415 | struct drm_file *file_priv) |
417 | { | | 416 | { |
418 | struct drm_map *map = data; | | 417 | struct drm_map *map = data; |
419 | struct drm_map_list *maplist; | | 418 | struct drm_map_list *maplist; |
420 | int err; | | 419 | int err; |
421 | | | 420 | |
422 | #ifdef __NetBSD__ | | 421 | #ifdef __NetBSD__ |
423 | # if 0 /* XXX Old drm did this. */ | | 422 | # if 0 /* XXX Old drm did this. */ |
424 | if (!(dev->flags & (FREAD | FWRITE))) | | 423 | if (!(dev->flags & (FREAD | FWRITE))) |
425 | return -EACCES; | | 424 | return -EACCES; |
426 | # endif | | 425 | # endif |
427 | if (!(DRM_SUSER() || map->type == _DRM_AGP || map->type == _DRM_SHM)) | | 426 | if (!(DRM_SUSER() || map->type == _DRM_AGP || map->type == _DRM_SHM)) |
428 | return -EACCES; /* XXX */ | | 427 | return -EACCES; /* XXX */ |
429 | #else | | 428 | #else |
430 | if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM)) | | 429 | if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM)) |
431 | return -EPERM; | | 430 | return -EPERM; |
432 | #endif | | 431 | #endif |
433 | | | 432 | |
434 | err = drm_addmap_core(dev, map->offset, map->size, map->type, | | 433 | err = drm_addmap_core(dev, map->offset, map->size, map->type, |
435 | map->flags, &maplist); | | 434 | map->flags, &maplist); |
436 | | | 435 | |
437 | if (err) | | 436 | if (err) |
438 | return err; | | 437 | return err; |
439 | | | 438 | |
440 | /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */ | | 439 | /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */ |
441 | map->handle = (void *)(unsigned long)maplist->user_token; | | 440 | map->handle = (void *)(unsigned long)maplist->user_token; |
442 | return 0; | | 441 | return 0; |
443 | } | | 442 | } |
444 | | | 443 | |
445 | /** | | 444 | /** |
446 | * Remove a map private from list and deallocate resources if the mapping | | 445 | * Remove a map private from list and deallocate resources if the mapping |
447 | * isn't in use. | | 446 | * isn't in use. |
448 | * | | 447 | * |
449 | * Searches the map on drm_device::maplist, removes it from the list, see if | | 448 | * Searches the map on drm_device::maplist, removes it from the list, see if |
450 | * its being used, and free any associate resource (such as MTRR's) if it's not | | 449 | * its being used, and free any associate resource (such as MTRR's) if it's not |
451 | * being on use. | | 450 | * being on use. |
452 | * | | 451 | * |
453 | * \sa drm_addmap | | 452 | * \sa drm_addmap |
454 | */ | | 453 | */ |
455 | int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map) | | 454 | int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map) |
456 | { | | 455 | { |
457 | struct drm_map_list *r_list = NULL, *list_t; | | 456 | struct drm_map_list *r_list = NULL, *list_t; |
458 | #ifndef __NetBSD__ | | 457 | #ifndef __NetBSD__ |
459 | drm_dma_handle_t dmah; | | 458 | drm_dma_handle_t dmah; |
460 | #endif | | 459 | #endif |
461 | int found = 0; | | 460 | int found = 0; |
462 | struct drm_master *master; | | 461 | struct drm_master *master; |
463 | | | 462 | |
464 | /* Find the list entry for the map and remove it */ | | 463 | /* Find the list entry for the map and remove it */ |
465 | list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) { | | 464 | list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) { |
466 | if (r_list->map == map) { | | 465 | if (r_list->map == map) { |
467 | master = r_list->master; | | 466 | master = r_list->master; |
468 | list_del(&r_list->head); | | 467 | list_del(&r_list->head); |
469 | drm_ht_remove_key(&dev->map_hash, | | 468 | drm_ht_remove_key(&dev->map_hash, |
470 | r_list->user_token >> PAGE_SHIFT); | | 469 | r_list->user_token >> PAGE_SHIFT); |
471 | kfree(r_list); | | 470 | kfree(r_list); |
472 | found = 1; | | 471 | found = 1; |
473 | break; | | 472 | break; |
474 | } | | 473 | } |
475 | } | | 474 | } |
476 | | | 475 | |
477 | if (!found) | | 476 | if (!found) |
478 | return -EINVAL; | | 477 | return -EINVAL; |
479 | | | 478 | |
480 | switch (map->type) { | | 479 | switch (map->type) { |
481 | case _DRM_REGISTERS: | | 480 | case _DRM_REGISTERS: |
482 | #ifdef __NetBSD__ | | 481 | #ifdef __NetBSD__ |
483 | drm_iounmap(dev, map); | | 482 | drm_iounmap(dev, map); |
484 | #else | | 483 | #else |
485 | iounmap(map->handle); | | 484 | iounmap(map->handle); |
486 | #endif | | 485 | #endif |
487 | /* FALLTHROUGH */ | | 486 | /* FALLTHROUGH */ |
488 | case _DRM_FRAME_BUFFER: | | 487 | case _DRM_FRAME_BUFFER: |
489 | if (drm_core_has_MTRR(dev) && map->mtrr >= 0) { | | 488 | if (drm_core_has_MTRR(dev) && map->mtrr >= 0) { |
490 | int retcode; | | 489 | int retcode; |
491 | retcode = mtrr_del(map->mtrr, map->offset, map->size); | | 490 | retcode = mtrr_del(map->mtrr, map->offset, map->size); |
492 | DRM_DEBUG("mtrr_del=%d\n", retcode); | | 491 | DRM_DEBUG("mtrr_del=%d\n", retcode); |
493 | } | | 492 | } |
494 | break; | | 493 | break; |
495 | case _DRM_SHM: | | 494 | case _DRM_SHM: |
496 | vfree(map->handle); | | 495 | vfree(map->handle); |
497 | if (master) { | | 496 | if (master) { |
498 | if (dev->sigdata.lock == master->lock.hw_lock) | | 497 | if (dev->sigdata.lock == master->lock.hw_lock) |
499 | dev->sigdata.lock = NULL; | | 498 | dev->sigdata.lock = NULL; |
500 | master->lock.hw_lock = NULL; /* SHM removed */ | | 499 | master->lock.hw_lock = NULL; /* SHM removed */ |
501 | master->lock.file_priv = NULL; | | 500 | master->lock.file_priv = NULL; |
502 | #ifdef __NetBSD__ | | 501 | #ifdef __NetBSD__ |
503 | DRM_WAKEUP_ALL(&master->lock.lock_queue, | | 502 | DRM_WAKEUP_ALL(&master->lock.lock_queue, |
504 | &drm_global_mutex); | | 503 | &drm_global_mutex); |
505 | #else | | 504 | #else |
506 | wake_up_interruptible_all(&master->lock.lock_queue); | | 505 | wake_up_interruptible_all(&master->lock.lock_queue); |
507 | #endif | | 506 | #endif |
508 | } | | 507 | } |
509 | break; | | 508 | break; |
510 | case _DRM_AGP: | | 509 | case _DRM_AGP: |
511 | case _DRM_SCATTER_GATHER: | | 510 | case _DRM_SCATTER_GATHER: |
512 | break; | | 511 | break; |
513 | case _DRM_CONSISTENT: | | 512 | case _DRM_CONSISTENT: |
514 | #ifdef __NetBSD__ | | 513 | #ifdef __NetBSD__ |
515 | drm_pci_free(dev, map->lm_data.dmah); | | 514 | drm_pci_free(dev, map->lm_data.dmah); |
516 | #else | | 515 | #else |
517 | dmah.vaddr = map->handle; | | 516 | dmah.vaddr = map->handle; |
518 | dmah.busaddr = map->offset; | | 517 | dmah.busaddr = map->offset; |
519 | dmah.size = map->size; | | 518 | dmah.size = map->size; |
520 | __drm_pci_free(dev, &dmah); | | 519 | __drm_pci_free(dev, &dmah); |
521 | #endif | | 520 | #endif |
522 | break; | | 521 | break; |
523 | case _DRM_GEM: | | 522 | case _DRM_GEM: |
524 | DRM_ERROR("tried to rmmap GEM object\n"); | | 523 | DRM_ERROR("tried to rmmap GEM object\n"); |
525 | break; | | 524 | break; |
526 | } | | 525 | } |
527 | kfree(map); | | 526 | kfree(map); |
528 | | | 527 | |
529 | return 0; | | 528 | return 0; |
530 | } | | 529 | } |
531 | EXPORT_SYMBOL(drm_rmmap_locked); | | 530 | EXPORT_SYMBOL(drm_rmmap_locked); |
532 | | | 531 | |
533 | int drm_rmmap(struct drm_device *dev, struct drm_local_map *map) | | 532 | int drm_rmmap(struct drm_device *dev, struct drm_local_map *map) |
534 | { | | 533 | { |
535 | int ret; | | 534 | int ret; |
536 | | | 535 | |
537 | mutex_lock(&dev->struct_mutex); | | 536 | mutex_lock(&dev->struct_mutex); |
538 | ret = drm_rmmap_locked(dev, map); | | 537 | ret = drm_rmmap_locked(dev, map); |
539 | mutex_unlock(&dev->struct_mutex); | | 538 | mutex_unlock(&dev->struct_mutex); |
540 | | | 539 | |
541 | return ret; | | 540 | return ret; |
542 | } | | 541 | } |
543 | EXPORT_SYMBOL(drm_rmmap); | | 542 | EXPORT_SYMBOL(drm_rmmap); |
544 | | | 543 | |
545 | /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on | | 544 | /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on |
546 | * the last close of the device, and this is necessary for cleanup when things | | 545 | * the last close of the device, and this is necessary for cleanup when things |
547 | * exit uncleanly. Therefore, having userland manually remove mappings seems | | 546 | * exit uncleanly. Therefore, having userland manually remove mappings seems |
548 | * like a pointless exercise since they're going away anyway. | | 547 | * like a pointless exercise since they're going away anyway. |
549 | * | | 548 | * |
550 | * One use case might be after addmap is allowed for normal users for SHM and | | 549 | * One use case might be after addmap is allowed for normal users for SHM and |
551 | * gets used by drivers that the server doesn't need to care about. This seems | | 550 | * gets used by drivers that the server doesn't need to care about. This seems |
552 | * unlikely. | | 551 | * unlikely. |
553 | * | | 552 | * |
554 | * \param inode device inode. | | 553 | * \param inode device inode. |
555 | * \param file_priv DRM file private. | | 554 | * \param file_priv DRM file private. |
556 | * \param cmd command. | | 555 | * \param cmd command. |
557 | * \param arg pointer to a struct drm_map structure. | | 556 | * \param arg pointer to a struct drm_map structure. |
558 | * \return zero on success or a negative value on error. | | 557 | * \return zero on success or a negative value on error. |
559 | */ | | 558 | */ |
560 | int drm_rmmap_ioctl(struct drm_device *dev, void *data, | | 559 | int drm_rmmap_ioctl(struct drm_device *dev, void *data, |
561 | struct drm_file *file_priv) | | 560 | struct drm_file *file_priv) |
562 | { | | 561 | { |
563 | struct drm_map *request = data; | | 562 | struct drm_map *request = data; |
564 | struct drm_local_map *map = NULL; | | 563 | struct drm_local_map *map = NULL; |
565 | struct drm_map_list *r_list; | | 564 | struct drm_map_list *r_list; |
566 | int ret; | | 565 | int ret; |
567 | | | 566 | |
568 | mutex_lock(&dev->struct_mutex); | | 567 | mutex_lock(&dev->struct_mutex); |
569 | list_for_each_entry(r_list, &dev->maplist, head) { | | 568 | list_for_each_entry(r_list, &dev->maplist, head) { |
570 | if (r_list->map && | | 569 | if (r_list->map && |
571 | r_list->user_token == (unsigned long)request->handle && | | 570 | r_list->user_token == (unsigned long)request->handle && |
572 | r_list->map->flags & _DRM_REMOVABLE) { | | 571 | r_list->map->flags & _DRM_REMOVABLE) { |
573 | map = r_list->map; | | 572 | map = r_list->map; |
574 | break; | | 573 | break; |
575 | } | | 574 | } |
576 | } | | 575 | } |
577 | | | 576 | |
578 | /* List has wrapped around to the head pointer, or its empty we didn't | | 577 | /* List has wrapped around to the head pointer, or its empty we didn't |
579 | * find anything. | | 578 | * find anything. |
580 | */ | | 579 | */ |
581 | if (list_empty(&dev->maplist) || !map) { | | 580 | if (list_empty(&dev->maplist) || !map) { |
582 | mutex_unlock(&dev->struct_mutex); | | 581 | mutex_unlock(&dev->struct_mutex); |
583 | return -EINVAL; | | 582 | return -EINVAL; |
584 | } | | 583 | } |
585 | | | 584 | |
586 | /* Register and framebuffer maps are permanent */ | | 585 | /* Register and framebuffer maps are permanent */ |
587 | if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) { | | 586 | if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) { |
588 | mutex_unlock(&dev->struct_mutex); | | 587 | mutex_unlock(&dev->struct_mutex); |
589 | return 0; | | 588 | return 0; |
590 | } | | 589 | } |
591 | | | 590 | |
592 | ret = drm_rmmap_locked(dev, map); | | 591 | ret = drm_rmmap_locked(dev, map); |
593 | | | 592 | |
594 | mutex_unlock(&dev->struct_mutex); | | 593 | mutex_unlock(&dev->struct_mutex); |
595 | | | 594 | |
596 | return ret; | | 595 | return ret; |
597 | } | | 596 | } |
598 | | | 597 | |
599 | /** | | 598 | /** |
600 | * Cleanup after an error on one of the addbufs() functions. | | 599 | * Cleanup after an error on one of the addbufs() functions. |
601 | * | | 600 | * |
602 | * \param dev DRM device. | | 601 | * \param dev DRM device. |
603 | * \param entry buffer entry where the error occurred. | | 602 | * \param entry buffer entry where the error occurred. |
604 | * | | 603 | * |
605 | * Frees any pages and buffers associated with the given entry. | | 604 | * Frees any pages and buffers associated with the given entry. |
606 | */ | | 605 | */ |
607 | static void drm_cleanup_buf_error(struct drm_device * dev, | | 606 | static void drm_cleanup_buf_error(struct drm_device * dev, |
608 | struct drm_buf_entry * entry) | | 607 | struct drm_buf_entry * entry) |
609 | { | | 608 | { |
610 | int i; | | 609 | int i; |
611 | | | 610 | |
612 | if (entry->seg_count) { | | 611 | if (entry->seg_count) { |
613 | for (i = 0; i < entry->seg_count; i++) { | | 612 | for (i = 0; i < entry->seg_count; i++) { |
614 | if (entry->seglist[i]) { | | 613 | if (entry->seglist[i]) { |
615 | drm_pci_free(dev, entry->seglist[i]); | | 614 | drm_pci_free(dev, entry->seglist[i]); |
616 | } | | 615 | } |
617 | } | | 616 | } |
618 | kfree(entry->seglist); | | 617 | kfree(entry->seglist); |
619 | | | 618 | |
620 | entry->seg_count = 0; | | 619 | entry->seg_count = 0; |
621 | } | | 620 | } |
622 | | | 621 | |
623 | if (entry->buf_count) { | | 622 | if (entry->buf_count) { |
624 | for (i = 0; i < entry->buf_count; i++) { | | 623 | for (i = 0; i < entry->buf_count; i++) { |
625 | kfree(entry->buflist[i].dev_private); | | 624 | kfree(entry->buflist[i].dev_private); |
626 | } | | 625 | } |
627 | kfree(entry->buflist); | | 626 | kfree(entry->buflist); |
628 | | | 627 | |
629 | entry->buf_count = 0; | | 628 | entry->buf_count = 0; |
630 | } | | 629 | } |
631 | } | | 630 | } |
632 | | | 631 | |
633 | #if __OS_HAS_AGP | | 632 | #if __OS_HAS_AGP |
634 | /** | | 633 | /** |
635 | * Add AGP buffers for DMA transfers. | | 634 | * Add AGP buffers for DMA transfers. |
636 | * | | 635 | * |
637 | * \param dev struct drm_device to which the buffers are to be added. | | 636 | * \param dev struct drm_device to which the buffers are to be added. |
638 | * \param request pointer to a struct drm_buf_desc describing the request. | | 637 | * \param request pointer to a struct drm_buf_desc describing the request. |
639 | * \return zero on success or a negative number on failure. | | 638 | * \return zero on success or a negative number on failure. |
640 | * | | 639 | * |
641 | * After some sanity checks creates a drm_buf structure for each buffer and | | 640 | * After some sanity checks creates a drm_buf structure for each buffer and |
642 | * reallocates the buffer list of the same size order to accommodate the new | | 641 | * reallocates the buffer list of the same size order to accommodate the new |
643 | * buffers. | | 642 | * buffers. |
644 | */ | | 643 | */ |
645 | int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request) | | 644 | int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request) |
646 | { | | 645 | { |
647 | struct drm_device_dma *dma = dev->dma; | | 646 | struct drm_device_dma *dma = dev->dma; |
648 | struct drm_buf_entry *entry; | | 647 | struct drm_buf_entry *entry; |
649 | struct drm_agp_mem *agp_entry; | | 648 | struct drm_agp_mem *agp_entry; |
650 | struct drm_buf *buf; | | 649 | struct drm_buf *buf; |
651 | unsigned long offset; | | 650 | unsigned long offset; |
652 | unsigned long agp_offset; | | 651 | unsigned long agp_offset; |
653 | int count; | | 652 | int count; |
654 | int order; | | 653 | int order; |
655 | int size; | | 654 | int size; |
656 | int alignment; | | 655 | int alignment; |
657 | int page_order; | | 656 | int page_order; |
658 | int total; | | 657 | int total; |
659 | int byte_count; | | 658 | int byte_count; |
660 | int i, valid; | | 659 | int i, valid; |
661 | struct drm_buf **temp_buflist; | | 660 | struct drm_buf **temp_buflist; |
662 | | | 661 | |
663 | if (!dma) | | 662 | if (!dma) |
664 | return -EINVAL; | | 663 | return -EINVAL; |
665 | | | 664 | |
666 | count = request->count; | | 665 | count = request->count; |
667 | order = drm_order(request->size); | | 666 | order = drm_order(request->size); |
668 | size = 1 << order; | | 667 | size = 1 << order; |
669 | | | 668 | |
670 | alignment = (request->flags & _DRM_PAGE_ALIGN) | | 669 | alignment = (request->flags & _DRM_PAGE_ALIGN) |
671 | ? PAGE_ALIGN(size) : size; | | 670 | ? PAGE_ALIGN(size) : size; |
672 | page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; | | 671 | page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; |
673 | total = PAGE_SIZE << page_order; | | 672 | total = PAGE_SIZE << page_order; |
674 | | | 673 | |
675 | byte_count = 0; | | 674 | byte_count = 0; |
676 | agp_offset = dev->agp->base + request->agp_start; | | 675 | agp_offset = dev->agp->base + request->agp_start; |
677 | | | 676 | |
678 | DRM_DEBUG("count: %d\n", count); | | 677 | DRM_DEBUG("count: %d\n", count); |
679 | DRM_DEBUG("order: %d\n", order); | | 678 | DRM_DEBUG("order: %d\n", order); |
680 | DRM_DEBUG("size: %d\n", size); | | 679 | DRM_DEBUG("size: %d\n", size); |
681 | DRM_DEBUG("agp_offset: %lx\n", agp_offset); | | 680 | DRM_DEBUG("agp_offset: %lx\n", agp_offset); |
682 | DRM_DEBUG("alignment: %d\n", alignment); | | 681 | DRM_DEBUG("alignment: %d\n", alignment); |
683 | DRM_DEBUG("page_order: %d\n", page_order); | | 682 | DRM_DEBUG("page_order: %d\n", page_order); |
684 | DRM_DEBUG("total: %d\n", total); | | 683 | DRM_DEBUG("total: %d\n", total); |
685 | | | 684 | |
686 | if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) | | 685 | if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) |
687 | return -EINVAL; | | 686 | return -EINVAL; |
688 | | | 687 | |
689 | /* Make sure buffers are located in AGP memory that we own */ | | 688 | /* Make sure buffers are located in AGP memory that we own */ |
690 | valid = 0; | | 689 | valid = 0; |
691 | list_for_each_entry(agp_entry, &dev->agp->memory, head) { | | 690 | list_for_each_entry(agp_entry, &dev->agp->memory, head) { |
692 | if ((agp_offset >= agp_entry->bound) && | | 691 | if ((agp_offset >= agp_entry->bound) && |
693 | (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) { | | 692 | (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) { |
694 | valid = 1; | | 693 | valid = 1; |
695 | break; | | 694 | break; |
696 | } | | 695 | } |
697 | } | | 696 | } |
698 | if (!list_empty(&dev->agp->memory) && !valid) { | | 697 | if (!list_empty(&dev->agp->memory) && !valid) { |
699 | DRM_DEBUG("zone invalid\n"); | | 698 | DRM_DEBUG("zone invalid\n"); |
700 | return -EINVAL; | | 699 | return -EINVAL; |
701 | } | | 700 | } |
702 | spin_lock(&dev->count_lock); | | 701 | spin_lock(&dev->count_lock); |
703 | if (dev->buf_use) { | | 702 | if (dev->buf_use) { |
704 | spin_unlock(&dev->count_lock); | | 703 | spin_unlock(&dev->count_lock); |
705 | return -EBUSY; | | 704 | return -EBUSY; |
706 | } | | 705 | } |
707 | atomic_inc(&dev->buf_alloc); | | 706 | atomic_inc(&dev->buf_alloc); |
708 | spin_unlock(&dev->count_lock); | | 707 | spin_unlock(&dev->count_lock); |
709 | | | 708 | |
710 | mutex_lock(&dev->struct_mutex); | | 709 | mutex_lock(&dev->struct_mutex); |
711 | entry = &dma->bufs[order]; | | 710 | entry = &dma->bufs[order]; |
712 | if (entry->buf_count) { | | 711 | if (entry->buf_count) { |
713 | mutex_unlock(&dev->struct_mutex); | | 712 | mutex_unlock(&dev->struct_mutex); |
714 | atomic_dec(&dev->buf_alloc); | | 713 | atomic_dec(&dev->buf_alloc); |
715 | return -ENOMEM; /* May only call once for each order */ | | 714 | return -ENOMEM; /* May only call once for each order */ |
716 | } | | 715 | } |
717 | | | 716 | |
718 | if (count < 0 || count > 4096) { | | 717 | if (count < 0 || count > 4096) { |
719 | mutex_unlock(&dev->struct_mutex); | | 718 | mutex_unlock(&dev->struct_mutex); |
720 | atomic_dec(&dev->buf_alloc); | | 719 | atomic_dec(&dev->buf_alloc); |
721 | return -EINVAL; | | 720 | return -EINVAL; |
722 | } | | 721 | } |
723 | | | 722 | |
724 | entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL); | | 723 | entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL); |
725 | if (!entry->buflist) { | | 724 | if (!entry->buflist) { |
726 | mutex_unlock(&dev->struct_mutex); | | 725 | mutex_unlock(&dev->struct_mutex); |
727 | atomic_dec(&dev->buf_alloc); | | 726 | atomic_dec(&dev->buf_alloc); |
728 | return -ENOMEM; | | 727 | return -ENOMEM; |
729 | } | | 728 | } |
730 | | | 729 | |
731 | entry->buf_size = size; | | 730 | entry->buf_size = size; |
732 | entry->page_order = page_order; | | 731 | entry->page_order = page_order; |
733 | | | 732 | |
734 | offset = 0; | | 733 | offset = 0; |
735 | | | 734 | |
736 | while (entry->buf_count < count) { | | 735 | while (entry->buf_count < count) { |
737 | buf = &entry->buflist[entry->buf_count]; | | 736 | buf = &entry->buflist[entry->buf_count]; |
738 | buf->idx = dma->buf_count + entry->buf_count; | | 737 | buf->idx = dma->buf_count + entry->buf_count; |
739 | buf->total = alignment; | | 738 | buf->total = alignment; |
740 | buf->order = order; | | 739 | buf->order = order; |
741 | buf->used = 0; | | 740 | buf->used = 0; |
742 | | | 741 | |
743 | buf->offset = (dma->byte_count + offset); | | 742 | buf->offset = (dma->byte_count + offset); |
744 | buf->bus_address = agp_offset + offset; | | 743 | buf->bus_address = agp_offset + offset; |
745 | buf->address = (void *)(agp_offset + offset); | | 744 | buf->address = (void *)(agp_offset + offset); |
746 | buf->next = NULL; | | 745 | buf->next = NULL; |
747 | buf->waiting = 0; | | 746 | buf->waiting = 0; |
748 | buf->pending = 0; | | 747 | buf->pending = 0; |
749 | buf->file_priv = NULL; | | 748 | buf->file_priv = NULL; |
750 | | | 749 | |
751 | buf->dev_priv_size = dev->driver->dev_priv_size; | | 750 | buf->dev_priv_size = dev->driver->dev_priv_size; |
752 | buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL); | | 751 | buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL); |
753 | if (!buf->dev_private) { | | 752 | if (!buf->dev_private) { |
754 | /* Set count correctly so we free the proper amount. */ | | 753 | /* Set count correctly so we free the proper amount. */ |
755 | entry->buf_count = count; | | 754 | entry->buf_count = count; |
756 | drm_cleanup_buf_error(dev, entry); | | 755 | drm_cleanup_buf_error(dev, entry); |
757 | mutex_unlock(&dev->struct_mutex); | | 756 | mutex_unlock(&dev->struct_mutex); |
758 | atomic_dec(&dev->buf_alloc); | | 757 | atomic_dec(&dev->buf_alloc); |
759 | return -ENOMEM; | | 758 | return -ENOMEM; |
760 | } | | 759 | } |
761 | | | 760 | |
762 | DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); | | 761 | DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); |
763 | | | 762 | |
764 | offset += alignment; | | 763 | offset += alignment; |
765 | entry->buf_count++; | | 764 | entry->buf_count++; |
766 | byte_count += PAGE_SIZE << page_order; | | 765 | byte_count += PAGE_SIZE << page_order; |
767 | } | | 766 | } |
768 | | | 767 | |
769 | DRM_DEBUG("byte_count: %d\n", byte_count); | | 768 | DRM_DEBUG("byte_count: %d\n", byte_count); |
770 | | | 769 | |
771 | temp_buflist = krealloc(dma->buflist, | | 770 | temp_buflist = krealloc(dma->buflist, |
772 | (dma->buf_count + entry->buf_count) * | | 771 | (dma->buf_count + entry->buf_count) * |
773 | sizeof(*dma->buflist), GFP_KERNEL); | | 772 | sizeof(*dma->buflist), GFP_KERNEL); |
774 | if (!temp_buflist) { | | 773 | if (!temp_buflist) { |
775 | /* Free the entry because it isn't valid */ | | 774 | /* Free the entry because it isn't valid */ |
776 | drm_cleanup_buf_error(dev, entry); | | 775 | drm_cleanup_buf_error(dev, entry); |
777 | mutex_unlock(&dev->struct_mutex); | | 776 | mutex_unlock(&dev->struct_mutex); |
778 | atomic_dec(&dev->buf_alloc); | | 777 | atomic_dec(&dev->buf_alloc); |
779 | return -ENOMEM; | | 778 | return -ENOMEM; |
780 | } | | 779 | } |
781 | dma->buflist = temp_buflist; | | 780 | dma->buflist = temp_buflist; |
782 | | | 781 | |
783 | for (i = 0; i < entry->buf_count; i++) { | | 782 | for (i = 0; i < entry->buf_count; i++) { |
784 | dma->buflist[i + dma->buf_count] = &entry->buflist[i]; | | 783 | dma->buflist[i + dma->buf_count] = &entry->buflist[i]; |
785 | } | | 784 | } |
786 | | | 785 | |
787 | dma->buf_count += entry->buf_count; | | 786 | dma->buf_count += entry->buf_count; |
788 | dma->seg_count += entry->seg_count; | | 787 | dma->seg_count += entry->seg_count; |
789 | dma->page_count += byte_count >> PAGE_SHIFT; | | 788 | dma->page_count += byte_count >> PAGE_SHIFT; |
790 | dma->byte_count += byte_count; | | 789 | dma->byte_count += byte_count; |
791 | | | 790 | |
792 | DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); | | 791 | DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); |
793 | DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); | | 792 | DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); |
794 | | | 793 | |
795 | mutex_unlock(&dev->struct_mutex); | | 794 | mutex_unlock(&dev->struct_mutex); |
796 | | | 795 | |
797 | request->count = entry->buf_count; | | 796 | request->count = entry->buf_count; |
798 | request->size = size; | | 797 | request->size = size; |
799 | | | 798 | |
800 | dma->flags = _DRM_DMA_USE_AGP; | | 799 | dma->flags = _DRM_DMA_USE_AGP; |
801 | | | 800 | |
802 | atomic_dec(&dev->buf_alloc); | | 801 | atomic_dec(&dev->buf_alloc); |
803 | return 0; | | 802 | return 0; |
804 | } | | 803 | } |
805 | EXPORT_SYMBOL(drm_addbufs_agp); | | 804 | EXPORT_SYMBOL(drm_addbufs_agp); |
806 | #endif /* __OS_HAS_AGP */ | | 805 | #endif /* __OS_HAS_AGP */ |
807 | | | 806 | |
808 | int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request) | | 807 | int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request) |
809 | { | | 808 | { |
810 | struct drm_device_dma *dma = dev->dma; | | 809 | struct drm_device_dma *dma = dev->dma; |
811 | int count; | | 810 | int count; |
812 | int order; | | 811 | int order; |
813 | int size; | | 812 | int size; |
814 | int total; | | 813 | int total; |
815 | int page_order; | | 814 | int page_order; |
816 | struct drm_buf_entry *entry; | | 815 | struct drm_buf_entry *entry; |
817 | drm_dma_handle_t *dmah; | | 816 | drm_dma_handle_t *dmah; |
818 | struct drm_buf *buf; | | 817 | struct drm_buf *buf; |
819 | int alignment; | | 818 | int alignment; |
820 | unsigned long offset; | | 819 | unsigned long offset; |
821 | int i; | | 820 | int i; |
822 | int byte_count; | | 821 | int byte_count; |
823 | int page_count; | | 822 | int page_count; |
824 | unsigned long *temp_pagelist; | | 823 | unsigned long *temp_pagelist; |
825 | struct drm_buf **temp_buflist; | | 824 | struct drm_buf **temp_buflist; |
826 | | | 825 | |
827 | if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) | | 826 | if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) |
828 | return -EINVAL; | | 827 | return -EINVAL; |
829 | | | 828 | |
830 | if (!dma) | | 829 | if (!dma) |
831 | return -EINVAL; | | 830 | return -EINVAL; |
832 | | | 831 | |
833 | #ifdef __NetBSD__ | | 832 | #ifdef __NetBSD__ |
834 | if (!DRM_SUSER()) | | 833 | if (!DRM_SUSER()) |
835 | return -EACCES; /* XXX */ | | 834 | return -EACCES; /* XXX */ |
836 | #else | | 835 | #else |
837 | if (!capable(CAP_SYS_ADMIN)) | | 836 | if (!capable(CAP_SYS_ADMIN)) |
838 | return -EPERM; | | 837 | return -EPERM; |
839 | #endif | | 838 | #endif |
840 | | | 839 | |
841 | count = request->count; | | 840 | count = request->count; |
842 | order = drm_order(request->size); | | 841 | order = drm_order(request->size); |
843 | size = 1 << order; | | 842 | size = 1 << order; |
844 | | | 843 | |
845 | DRM_DEBUG("count=%d, size=%d (%d), order=%d\n", | | 844 | DRM_DEBUG("count=%d, size=%d (%d), order=%d\n", |
846 | request->count, request->size, size, order); | | 845 | request->count, request->size, size, order); |
847 | | | 846 | |
848 | if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) | | 847 | if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) |
849 | return -EINVAL; | | 848 | return -EINVAL; |
850 | | | 849 | |
851 | alignment = (request->flags & _DRM_PAGE_ALIGN) | | 850 | alignment = (request->flags & _DRM_PAGE_ALIGN) |
852 | ? PAGE_ALIGN(size) : size; | | 851 | ? PAGE_ALIGN(size) : size; |
853 | page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; | | 852 | page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; |
854 | total = PAGE_SIZE << page_order; | | 853 | total = PAGE_SIZE << page_order; |
855 | | | 854 | |
856 | spin_lock(&dev->count_lock); | | 855 | spin_lock(&dev->count_lock); |
857 | if (dev->buf_use) { | | 856 | if (dev->buf_use) { |
858 | spin_unlock(&dev->count_lock); | | 857 | spin_unlock(&dev->count_lock); |
859 | return -EBUSY; | | 858 | return -EBUSY; |
860 | } | | 859 | } |
861 | atomic_inc(&dev->buf_alloc); | | 860 | atomic_inc(&dev->buf_alloc); |
862 | spin_unlock(&dev->count_lock); | | 861 | spin_unlock(&dev->count_lock); |
863 | | | 862 | |
864 | mutex_lock(&dev->struct_mutex); | | 863 | mutex_lock(&dev->struct_mutex); |
865 | entry = &dma->bufs[order]; | | 864 | entry = &dma->bufs[order]; |
866 | if (entry->buf_count) { | | 865 | if (entry->buf_count) { |
867 | mutex_unlock(&dev->struct_mutex); | | 866 | mutex_unlock(&dev->struct_mutex); |
868 | atomic_dec(&dev->buf_alloc); | | 867 | atomic_dec(&dev->buf_alloc); |
869 | return -ENOMEM; /* May only call once for each order */ | | 868 | return -ENOMEM; /* May only call once for each order */ |
870 | } | | 869 | } |
871 | | | 870 | |
872 | if (count < 0 || count > 4096) { | | 871 | if (count < 0 || count > 4096) { |
873 | mutex_unlock(&dev->struct_mutex); | | 872 | mutex_unlock(&dev->struct_mutex); |
874 | atomic_dec(&dev->buf_alloc); | | 873 | atomic_dec(&dev->buf_alloc); |
875 | return -EINVAL; | | 874 | return -EINVAL; |
876 | } | | 875 | } |
877 | | | 876 | |
878 | entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL); | | 877 | entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL); |
879 | if (!entry->buflist) { | | 878 | if (!entry->buflist) { |
880 | mutex_unlock(&dev->struct_mutex); | | 879 | mutex_unlock(&dev->struct_mutex); |
881 | atomic_dec(&dev->buf_alloc); | | 880 | atomic_dec(&dev->buf_alloc); |
882 | return -ENOMEM; | | 881 | return -ENOMEM; |
883 | } | | 882 | } |
884 | | | 883 | |
885 | entry->seglist = kzalloc(count * sizeof(*entry->seglist), GFP_KERNEL); | | 884 | entry->seglist = kzalloc(count * sizeof(*entry->seglist), GFP_KERNEL); |
886 | if (!entry->seglist) { | | 885 | if (!entry->seglist) { |
887 | kfree(entry->buflist); | | 886 | kfree(entry->buflist); |
888 | mutex_unlock(&dev->struct_mutex); | | 887 | mutex_unlock(&dev->struct_mutex); |
889 | atomic_dec(&dev->buf_alloc); | | 888 | atomic_dec(&dev->buf_alloc); |
890 | return -ENOMEM; | | 889 | return -ENOMEM; |
891 | } | | 890 | } |
892 | | | 891 | |
893 | /* Keep the original pagelist until we know all the allocations | | 892 | /* Keep the original pagelist until we know all the allocations |
894 | * have succeeded | | 893 | * have succeeded |
895 | */ | | 894 | */ |
896 | temp_pagelist = kmalloc((dma->page_count + (count << page_order)) * | | 895 | temp_pagelist = kmalloc((dma->page_count + (count << page_order)) * |
897 | sizeof(*dma->pagelist), GFP_KERNEL); | | 896 | sizeof(*dma->pagelist), GFP_KERNEL); |
898 | if (!temp_pagelist) { | | 897 | if (!temp_pagelist) { |
899 | kfree(entry->buflist); | | 898 | kfree(entry->buflist); |
900 | kfree(entry->seglist); | | 899 | kfree(entry->seglist); |
901 | mutex_unlock(&dev->struct_mutex); | | 900 | mutex_unlock(&dev->struct_mutex); |
902 | atomic_dec(&dev->buf_alloc); | | 901 | atomic_dec(&dev->buf_alloc); |
903 | return -ENOMEM; | | 902 | return -ENOMEM; |
904 | } | | 903 | } |
905 | memcpy(temp_pagelist, | | 904 | memcpy(temp_pagelist, |
906 | dma->pagelist, dma->page_count * sizeof(*dma->pagelist)); | | 905 | dma->pagelist, dma->page_count * sizeof(*dma->pagelist)); |
907 | DRM_DEBUG("pagelist: %d entries\n", | | 906 | DRM_DEBUG("pagelist: %d entries\n", |
908 | dma->page_count + (count << page_order)); | | 907 | dma->page_count + (count << page_order)); |
909 | | | 908 | |
910 | entry->buf_size = size; | | 909 | entry->buf_size = size; |
911 | entry->page_order = page_order; | | 910 | entry->page_order = page_order; |
912 | byte_count = 0; | | 911 | byte_count = 0; |
913 | page_count = 0; | | 912 | page_count = 0; |
914 | | | 913 | |
915 | while (entry->buf_count < count) { | | 914 | while (entry->buf_count < count) { |
916 | | | 915 | |
917 | dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000); | | 916 | dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000); |
918 | | | 917 | |
919 | if (!dmah) { | | 918 | if (!dmah) { |
920 | /* Set count correctly so we free the proper amount. */ | | 919 | /* Set count correctly so we free the proper amount. */ |
921 | entry->buf_count = count; | | 920 | entry->buf_count = count; |
922 | entry->seg_count = count; | | 921 | entry->seg_count = count; |
923 | drm_cleanup_buf_error(dev, entry); | | 922 | drm_cleanup_buf_error(dev, entry); |
924 | kfree(temp_pagelist); | | 923 | kfree(temp_pagelist); |
925 | mutex_unlock(&dev->struct_mutex); | | 924 | mutex_unlock(&dev->struct_mutex); |
926 | atomic_dec(&dev->buf_alloc); | | 925 | atomic_dec(&dev->buf_alloc); |
927 | return -ENOMEM; | | 926 | return -ENOMEM; |
928 | } | | 927 | } |
929 | entry->seglist[entry->seg_count++] = dmah; | | 928 | entry->seglist[entry->seg_count++] = dmah; |
930 | for (i = 0; i < (1 << page_order); i++) { | | 929 | for (i = 0; i < (1 << page_order); i++) { |
931 | DRM_DEBUG("page %d @ 0x%08lx\n", | | 930 | DRM_DEBUG("page %d @ 0x%08lx\n", |
932 | dma->page_count + page_count, | | 931 | dma->page_count + page_count, |
933 | (unsigned long)dmah->vaddr + PAGE_SIZE * i); | | 932 | (unsigned long)dmah->vaddr + PAGE_SIZE * i); |
934 | temp_pagelist[dma->page_count + page_count++] | | 933 | temp_pagelist[dma->page_count + page_count++] |
935 | = (unsigned long)dmah->vaddr + PAGE_SIZE * i; | | 934 | = (unsigned long)dmah->vaddr + PAGE_SIZE * i; |
936 | } | | 935 | } |
937 | for (offset = 0; | | 936 | for (offset = 0; |
938 | offset + size <= total && entry->buf_count < count; | | 937 | offset + size <= total && entry->buf_count < count; |
939 | offset += alignment, ++entry->buf_count) { | | 938 | offset += alignment, ++entry->buf_count) { |
940 | buf = &entry->buflist[entry->buf_count]; | | 939 | buf = &entry->buflist[entry->buf_count]; |
941 | buf->idx = dma->buf_count + entry->buf_count; | | 940 | buf->idx = dma->buf_count + entry->buf_count; |
942 | buf->total = alignment; | | 941 | buf->total = alignment; |
943 | buf->order = order; | | 942 | buf->order = order; |
944 | buf->used = 0; | | 943 | buf->used = 0; |
945 | buf->offset = (dma->byte_count + byte_count + offset); | | 944 | buf->offset = (dma->byte_count + byte_count + offset); |
946 | #ifdef __NetBSD__ | | 945 | #ifdef __NetBSD__ |
947 | buf->address = (void *)((char *)dmah->vaddr + offset); | | 946 | buf->address = (void *)((char *)dmah->vaddr + offset); |
948 | #else | | 947 | #else |
949 | buf->address = (void *)(dmah->vaddr + offset); | | 948 | buf->address = (void *)(dmah->vaddr + offset); |
950 | #endif | | 949 | #endif |
951 | buf->bus_address = dmah->busaddr + offset; | | 950 | buf->bus_address = dmah->busaddr + offset; |
952 | buf->next = NULL; | | 951 | buf->next = NULL; |
953 | buf->waiting = 0; | | 952 | buf->waiting = 0; |
954 | buf->pending = 0; | | 953 | buf->pending = 0; |
955 | buf->file_priv = NULL; | | 954 | buf->file_priv = NULL; |
956 | | | 955 | |
957 | buf->dev_priv_size = dev->driver->dev_priv_size; | | 956 | buf->dev_priv_size = dev->driver->dev_priv_size; |
958 | buf->dev_private = kzalloc(buf->dev_priv_size, | | 957 | buf->dev_private = kzalloc(buf->dev_priv_size, |
959 | GFP_KERNEL); | | 958 | GFP_KERNEL); |
960 | if (!buf->dev_private) { | | 959 | if (!buf->dev_private) { |
961 | /* Set count correctly so we free the proper amount. */ | | 960 | /* Set count correctly so we free the proper amount. */ |
962 | entry->buf_count = count; | | 961 | entry->buf_count = count; |
963 | entry->seg_count = count; | | 962 | entry->seg_count = count; |
964 | drm_cleanup_buf_error(dev, entry); | | 963 | drm_cleanup_buf_error(dev, entry); |
965 | kfree(temp_pagelist); | | 964 | kfree(temp_pagelist); |
966 | mutex_unlock(&dev->struct_mutex); | | 965 | mutex_unlock(&dev->struct_mutex); |
967 | atomic_dec(&dev->buf_alloc); | | 966 | atomic_dec(&dev->buf_alloc); |
968 | return -ENOMEM; | | 967 | return -ENOMEM; |
969 | } | | 968 | } |
970 | | | 969 | |
971 | DRM_DEBUG("buffer %d @ %p\n", | | 970 | DRM_DEBUG("buffer %d @ %p\n", |
972 | entry->buf_count, buf->address); | | 971 | entry->buf_count, buf->address); |
973 | } | | 972 | } |
974 | byte_count += PAGE_SIZE << page_order; | | 973 | byte_count += PAGE_SIZE << page_order; |
975 | } | | 974 | } |
976 | | | 975 | |
977 | temp_buflist = krealloc(dma->buflist, | | 976 | temp_buflist = krealloc(dma->buflist, |
978 | (dma->buf_count + entry->buf_count) * | | 977 | (dma->buf_count + entry->buf_count) * |
979 | sizeof(*dma->buflist), GFP_KERNEL); | | 978 | sizeof(*dma->buflist), GFP_KERNEL); |
980 | if (!temp_buflist) { | | 979 | if (!temp_buflist) { |
981 | /* Free the entry because it isn't valid */ | | 980 | /* Free the entry because it isn't valid */ |
982 | drm_cleanup_buf_error(dev, entry); | | 981 | drm_cleanup_buf_error(dev, entry); |
983 | kfree(temp_pagelist); | | 982 | kfree(temp_pagelist); |
984 | mutex_unlock(&dev->struct_mutex); | | 983 | mutex_unlock(&dev->struct_mutex); |
985 | atomic_dec(&dev->buf_alloc); | | 984 | atomic_dec(&dev->buf_alloc); |
986 | return -ENOMEM; | | 985 | return -ENOMEM; |
987 | } | | 986 | } |
988 | dma->buflist = temp_buflist; | | 987 | dma->buflist = temp_buflist; |
989 | | | 988 | |
990 | for (i = 0; i < entry->buf_count; i++) { | | 989 | for (i = 0; i < entry->buf_count; i++) { |
991 | dma->buflist[i + dma->buf_count] = &entry->buflist[i]; | | 990 | dma->buflist[i + dma->buf_count] = &entry->buflist[i]; |
992 | } | | 991 | } |
993 | | | 992 | |
994 | /* No allocations failed, so now we can replace the original pagelist | | 993 | /* No allocations failed, so now we can replace the original pagelist |
995 | * with the new one. | | 994 | * with the new one. |
996 | */ | | 995 | */ |
997 | if (dma->page_count) { | | 996 | if (dma->page_count) { |
998 | kfree(dma->pagelist); | | 997 | kfree(dma->pagelist); |
999 | } | | 998 | } |
1000 | dma->pagelist = temp_pagelist; | | 999 | dma->pagelist = temp_pagelist; |
1001 | | | 1000 | |
1002 | dma->buf_count += entry->buf_count; | | 1001 | dma->buf_count += entry->buf_count; |
1003 | dma->seg_count += entry->seg_count; | | 1002 | dma->seg_count += entry->seg_count; |
1004 | dma->page_count += entry->seg_count << page_order; | | 1003 | dma->page_count += entry->seg_count << page_order; |
1005 | dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order); | | 1004 | dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order); |
1006 | | | 1005 | |
1007 | mutex_unlock(&dev->struct_mutex); | | 1006 | mutex_unlock(&dev->struct_mutex); |
1008 | | | 1007 | |
1009 | request->count = entry->buf_count; | | 1008 | request->count = entry->buf_count; |
1010 | request->size = size; | | 1009 | request->size = size; |
1011 | | | 1010 | |
1012 | if (request->flags & _DRM_PCI_BUFFER_RO) | | 1011 | if (request->flags & _DRM_PCI_BUFFER_RO) |
1013 | dma->flags = _DRM_DMA_USE_PCI_RO; | | 1012 | dma->flags = _DRM_DMA_USE_PCI_RO; |
1014 | | | 1013 | |
1015 | atomic_dec(&dev->buf_alloc); | | 1014 | atomic_dec(&dev->buf_alloc); |
1016 | return 0; | | 1015 | return 0; |
1017 | | | 1016 | |
1018 | } | | 1017 | } |
1019 | EXPORT_SYMBOL(drm_addbufs_pci); | | 1018 | EXPORT_SYMBOL(drm_addbufs_pci); |
1020 | | | 1019 | |
1021 | static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request) | | 1020 | static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request) |
1022 | { | | 1021 | { |
1023 | struct drm_device_dma *dma = dev->dma; | | 1022 | struct drm_device_dma *dma = dev->dma; |
1024 | struct drm_buf_entry *entry; | | 1023 | struct drm_buf_entry *entry; |
1025 | struct drm_buf *buf; | | 1024 | struct drm_buf *buf; |
1026 | unsigned long offset; | | 1025 | unsigned long offset; |
1027 | unsigned long agp_offset; | | 1026 | unsigned long agp_offset; |
1028 | int count; | | 1027 | int count; |
1029 | int order; | | 1028 | int order; |
1030 | int size; | | 1029 | int size; |
1031 | int alignment; | | 1030 | int alignment; |
1032 | int page_order; | | 1031 | int page_order; |
1033 | int total; | | 1032 | int total; |
1034 | int byte_count; | | 1033 | int byte_count; |
1035 | int i; | | 1034 | int i; |
1036 | struct drm_buf **temp_buflist; | | 1035 | struct drm_buf **temp_buflist; |
1037 | | | 1036 | |
1038 | if (!drm_core_check_feature(dev, DRIVER_SG)) | | 1037 | if (!drm_core_check_feature(dev, DRIVER_SG)) |
1039 | return -EINVAL; | | 1038 | return -EINVAL; |
1040 | | | 1039 | |
1041 | if (!dma) | | 1040 | if (!dma) |