Wed Jul 24 02:52:37 2013 UTC ()
Destroy the idr and spin lock in drm_gem_destroy.


(riastradh)
diff -r1.1.1.1.2.4 -r1.1.1.1.2.5 src/sys/external/bsd/drm2/dist/drm/drm_gem.c

cvs diff -r1.1.1.1.2.4 -r1.1.1.1.2.5 src/sys/external/bsd/drm2/dist/drm/drm_gem.c (switch to unified diff)

--- src/sys/external/bsd/drm2/dist/drm/drm_gem.c 2013/07/24 02:52:06 1.1.1.1.2.4
+++ src/sys/external/bsd/drm2/dist/drm/drm_gem.c 2013/07/24 02:52:37 1.1.1.1.2.5
@@ -1,738 +1,743 @@ @@ -1,738 +1,743 @@
1/* 1/*
2 * Copyright © 2008 Intel Corporation 2 * Copyright © 2008 Intel Corporation
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation 6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the 8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions: 9 * Software is furnished to do so, subject to the following conditions:
10 * 10 *
11 * The above copyright notice and this permission notice (including the next 11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the 12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software. 13 * Software.
14 * 14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE. 21 * IN THE SOFTWARE.
22 * 22 *
23 * Authors: 23 * Authors:
24 * Eric Anholt <eric@anholt.net> 24 * Eric Anholt <eric@anholt.net>
25 * 25 *
26 */ 26 */
27 27
28#include <linux/types.h> 28#include <linux/types.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/mm.h> 30#include <linux/mm.h>
31#include <linux/uaccess.h> 31#include <linux/uaccess.h>
32#include <linux/fs.h> 32#include <linux/fs.h>
33#include <linux/file.h> 33#include <linux/file.h>
34#include <linux/module.h> 34#include <linux/module.h>
35#include <linux/mman.h> 35#include <linux/mman.h>
36#include <linux/pagemap.h> 36#include <linux/pagemap.h>
37#include <linux/shmem_fs.h> 37#include <linux/shmem_fs.h>
38#include <linux/dma-buf.h> 38#include <linux/dma-buf.h>
39#include <linux/err.h> 39#include <linux/err.h>
40#include <linux/export.h> 40#include <linux/export.h>
41#include <asm/bug.h> 41#include <asm/bug.h>
42#include <drm/drmP.h> 42#include <drm/drmP.h>
43 43
44/** @file drm_gem.c 44/** @file drm_gem.c
45 * 45 *
46 * This file provides some of the base ioctls and library routines for 46 * This file provides some of the base ioctls and library routines for
47 * the graphics memory manager implemented by each device driver. 47 * the graphics memory manager implemented by each device driver.
48 * 48 *
49 * Because various devices have different requirements in terms of 49 * Because various devices have different requirements in terms of
50 * synchronization and migration strategies, implementing that is left up to 50 * synchronization and migration strategies, implementing that is left up to
51 * the driver, and all that the general API provides should be generic -- 51 * the driver, and all that the general API provides should be generic --
52 * allocating objects, reading/writing data with the cpu, freeing objects. 52 * allocating objects, reading/writing data with the cpu, freeing objects.
53 * Even there, platform-dependent optimizations for reading/writing data with 53 * Even there, platform-dependent optimizations for reading/writing data with
54 * the CPU mean we'll likely hook those out to driver-specific calls. However, 54 * the CPU mean we'll likely hook those out to driver-specific calls. However,
55 * the DRI2 implementation wants to have at least allocate/mmap be generic. 55 * the DRI2 implementation wants to have at least allocate/mmap be generic.
56 * 56 *
57 * The goal was to have swap-backed object allocation managed through 57 * The goal was to have swap-backed object allocation managed through
58 * struct file. However, file descriptors as handles to a struct file have 58 * struct file. However, file descriptors as handles to a struct file have
59 * two major failings: 59 * two major failings:
60 * - Process limits prevent more than 1024 or so being used at a time by 60 * - Process limits prevent more than 1024 or so being used at a time by
61 * default. 61 * default.
62 * - Inability to allocate high fds will aggravate the X Server's select() 62 * - Inability to allocate high fds will aggravate the X Server's select()
63 * handling, and likely that of many GL client applications as well. 63 * handling, and likely that of many GL client applications as well.
64 * 64 *
65 * This led to a plan of using our own integer IDs (called handles, following 65 * This led to a plan of using our own integer IDs (called handles, following
66 * DRM terminology) to mimic fds, and implement the fd syscalls we need as 66 * DRM terminology) to mimic fds, and implement the fd syscalls we need as
67 * ioctls. The objects themselves will still include the struct file so 67 * ioctls. The objects themselves will still include the struct file so
68 * that we can transition to fds if the required kernel infrastructure shows 68 * that we can transition to fds if the required kernel infrastructure shows
69 * up at a later date, and as our interface with shmfs for memory allocation. 69 * up at a later date, and as our interface with shmfs for memory allocation.
70 */ 70 */
71 71
72/* 72/*
73 * We make up offsets for buffer objects so we can recognize them at 73 * We make up offsets for buffer objects so we can recognize them at
74 * mmap time. 74 * mmap time.
75 */ 75 */
76 76
77/* pgoff in mmap is an unsigned long, so we need to make sure that 77/* pgoff in mmap is an unsigned long, so we need to make sure that
78 * the faked up offset will fit 78 * the faked up offset will fit
79 */ 79 */
80 80
81#if BITS_PER_LONG == 64 81#if BITS_PER_LONG == 64
82#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1) 82#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
83#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16) 83#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
84#else 84#else
85#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1) 85#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
86#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16) 86#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
87#endif 87#endif
88 88
89/** 89/**
90 * Initialize the GEM device fields 90 * Initialize the GEM device fields
91 */ 91 */
92 92
93int 93int
94drm_gem_init(struct drm_device *dev) 94drm_gem_init(struct drm_device *dev)
95{ 95{
96 struct drm_gem_mm *mm; 96 struct drm_gem_mm *mm;
97 97
98 spin_lock_init(&dev->object_name_lock); 98 spin_lock_init(&dev->object_name_lock);
99 idr_init(&dev->object_name_idr); 99 idr_init(&dev->object_name_idr);
100 100
101 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL); 101 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
102 if (!mm) { 102 if (!mm) {
103 DRM_ERROR("out of memory\n"); 103 DRM_ERROR("out of memory\n");
104 return -ENOMEM; 104 return -ENOMEM;
105 } 105 }
106 106
107 dev->mm_private = mm; 107 dev->mm_private = mm;
108 108
109 if (drm_ht_create(&mm->offset_hash, 12)) { 109 if (drm_ht_create(&mm->offset_hash, 12)) {
110 kfree(mm); 110 kfree(mm);
111 return -ENOMEM; 111 return -ENOMEM;
112 } 112 }
113 113
114 if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START, 114 if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
115 DRM_FILE_PAGE_OFFSET_SIZE)) { 115 DRM_FILE_PAGE_OFFSET_SIZE)) {
116 drm_ht_remove(&mm->offset_hash); 116 drm_ht_remove(&mm->offset_hash);
117 kfree(mm); 117 kfree(mm);
118 return -ENOMEM; 118 return -ENOMEM;
119 } 119 }
120 120
121 return 0; 121 return 0;
122} 122}
123 123
124void 124void
125drm_gem_destroy(struct drm_device *dev) 125drm_gem_destroy(struct drm_device *dev)
126{ 126{
127 struct drm_gem_mm *mm = dev->mm_private; 127 struct drm_gem_mm *mm = dev->mm_private;
128 128
129 drm_mm_takedown(&mm->offset_manager); 129 drm_mm_takedown(&mm->offset_manager);
130 drm_ht_remove(&mm->offset_hash); 130 drm_ht_remove(&mm->offset_hash);
131 kfree(mm); 131 kfree(mm);
132 dev->mm_private = NULL; 132 dev->mm_private = NULL;
 133
 134#ifdef __NetBSD__
 135 idr_destroy(&dev->object_name_idr);
 136 spin_lock_destroy(&dev->object_name_lock);
 137#endif
133} 138}
134 139
135/** 140/**
136 * Initialize an already allocated GEM object of the specified size with 141 * Initialize an already allocated GEM object of the specified size with
137 * shmfs backing store. 142 * shmfs backing store.
138 */ 143 */
139int drm_gem_object_init(struct drm_device *dev, 144int drm_gem_object_init(struct drm_device *dev,
140 struct drm_gem_object *obj, size_t size) 145 struct drm_gem_object *obj, size_t size)
141{ 146{
142 BUG_ON((size & (PAGE_SIZE - 1)) != 0); 147 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
143 148
144 obj->dev = dev; 149 obj->dev = dev;
145 obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); 150 obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
146 if (IS_ERR(obj->filp)) 151 if (IS_ERR(obj->filp))
147 return PTR_ERR(obj->filp); 152 return PTR_ERR(obj->filp);
148 153
149 kref_init(&obj->refcount); 154 kref_init(&obj->refcount);
150 atomic_set(&obj->handle_count, 0); 155 atomic_set(&obj->handle_count, 0);
151 obj->size = size; 156 obj->size = size;
152 157
153 return 0; 158 return 0;
154} 159}
155EXPORT_SYMBOL(drm_gem_object_init); 160EXPORT_SYMBOL(drm_gem_object_init);
156 161
157/** 162/**
158 * Initialize an already allocated GEM object of the specified size with 163 * Initialize an already allocated GEM object of the specified size with
159 * no GEM provided backing store. Instead the caller is responsible for 164 * no GEM provided backing store. Instead the caller is responsible for
160 * backing the object and handling it. 165 * backing the object and handling it.
161 */ 166 */
162int drm_gem_private_object_init(struct drm_device *dev, 167int drm_gem_private_object_init(struct drm_device *dev,
163 struct drm_gem_object *obj, size_t size) 168 struct drm_gem_object *obj, size_t size)
164{ 169{
165 BUG_ON((size & (PAGE_SIZE - 1)) != 0); 170 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
166 171
167 obj->dev = dev; 172 obj->dev = dev;
168 obj->filp = NULL; 173 obj->filp = NULL;
169 174
170 kref_init(&obj->refcount); 175 kref_init(&obj->refcount);
171 atomic_set(&obj->handle_count, 0); 176 atomic_set(&obj->handle_count, 0);
172 obj->size = size; 177 obj->size = size;
173 178
174 return 0; 179 return 0;
175} 180}
176EXPORT_SYMBOL(drm_gem_private_object_init); 181EXPORT_SYMBOL(drm_gem_private_object_init);
177 182
178/** 183/**
179 * Allocate a GEM object of the specified size with shmfs backing store 184 * Allocate a GEM object of the specified size with shmfs backing store
180 */ 185 */
181struct drm_gem_object * 186struct drm_gem_object *
182drm_gem_object_alloc(struct drm_device *dev, size_t size) 187drm_gem_object_alloc(struct drm_device *dev, size_t size)
183{ 188{
184 struct drm_gem_object *obj; 189 struct drm_gem_object *obj;
185 190
186 obj = kzalloc(sizeof(*obj), GFP_KERNEL); 191 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
187 if (!obj) 192 if (!obj)
188 goto free; 193 goto free;
189 194
190 if (drm_gem_object_init(dev, obj, size) != 0) 195 if (drm_gem_object_init(dev, obj, size) != 0)
191 goto free; 196 goto free;
192 197
193 if (dev->driver->gem_init_object != NULL && 198 if (dev->driver->gem_init_object != NULL &&
194 dev->driver->gem_init_object(obj) != 0) { 199 dev->driver->gem_init_object(obj) != 0) {
195 goto fput; 200 goto fput;
196 } 201 }
197 return obj; 202 return obj;
198fput: 203fput:
199 /* Object_init mangles the global counters - readjust them. */ 204 /* Object_init mangles the global counters - readjust them. */
200 fput(obj->filp); 205 fput(obj->filp);
201free: 206free:
202 kfree(obj); 207 kfree(obj);
203 return NULL; 208 return NULL;
204} 209}
205EXPORT_SYMBOL(drm_gem_object_alloc); 210EXPORT_SYMBOL(drm_gem_object_alloc);
206 211
207#ifndef __NetBSD__ /* XXX drm prime */ 212#ifndef __NetBSD__ /* XXX drm prime */
208static void 213static void
209drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp) 214drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
210{ 215{
211 if (obj->import_attach) { 216 if (obj->import_attach) {
212 drm_prime_remove_imported_buf_handle(&filp->prime, 217 drm_prime_remove_imported_buf_handle(&filp->prime,
213 obj->import_attach->dmabuf); 218 obj->import_attach->dmabuf);
214 } 219 }
215 if (obj->export_dma_buf) { 220 if (obj->export_dma_buf) {
216 drm_prime_remove_imported_buf_handle(&filp->prime, 221 drm_prime_remove_imported_buf_handle(&filp->prime,
217 obj->export_dma_buf); 222 obj->export_dma_buf);
218 } 223 }
219} 224}
220#endif 225#endif
221 226
222/** 227/**
223 * Removes the mapping from handle to filp for this object. 228 * Removes the mapping from handle to filp for this object.
224 */ 229 */
225int 230int
226drm_gem_handle_delete(struct drm_file *filp, u32 handle) 231drm_gem_handle_delete(struct drm_file *filp, u32 handle)
227{ 232{
228 struct drm_device *dev; 233 struct drm_device *dev;
229 struct drm_gem_object *obj; 234 struct drm_gem_object *obj;
230 235
231 /* This is gross. The idr system doesn't let us try a delete and 236 /* This is gross. The idr system doesn't let us try a delete and
232 * return an error code. It just spews if you fail at deleting. 237 * return an error code. It just spews if you fail at deleting.
233 * So, we have to grab a lock around finding the object and then 238 * So, we have to grab a lock around finding the object and then
234 * doing the delete on it and dropping the refcount, or the user 239 * doing the delete on it and dropping the refcount, or the user
235 * could race us to double-decrement the refcount and cause a 240 * could race us to double-decrement the refcount and cause a
236 * use-after-free later. Given the frequency of our handle lookups, 241 * use-after-free later. Given the frequency of our handle lookups,
237 * we may want to use ida for number allocation and a hash table 242 * we may want to use ida for number allocation and a hash table
238 * for the pointers, anyway. 243 * for the pointers, anyway.
239 */ 244 */
240 spin_lock(&filp->table_lock); 245 spin_lock(&filp->table_lock);
241 246
242 /* Check if we currently have a reference on the object */ 247 /* Check if we currently have a reference on the object */
243 obj = idr_find(&filp->object_idr, handle); 248 obj = idr_find(&filp->object_idr, handle);
244 if (obj == NULL) { 249 if (obj == NULL) {
245 spin_unlock(&filp->table_lock); 250 spin_unlock(&filp->table_lock);
246 return -EINVAL; 251 return -EINVAL;
247 } 252 }
248 dev = obj->dev; 253 dev = obj->dev;
249 254
250 /* Release reference and decrement refcount. */ 255 /* Release reference and decrement refcount. */
251 idr_remove(&filp->object_idr, handle); 256 idr_remove(&filp->object_idr, handle);
252 spin_unlock(&filp->table_lock); 257 spin_unlock(&filp->table_lock);
253 258
254#ifndef __NetBSD__ 259#ifndef __NetBSD__
255 drm_gem_remove_prime_handles(obj, filp); 260 drm_gem_remove_prime_handles(obj, filp);
256#endif 261#endif
257 262
258 if (dev->driver->gem_close_object) 263 if (dev->driver->gem_close_object)
259 dev->driver->gem_close_object(obj, filp); 264 dev->driver->gem_close_object(obj, filp);
260 drm_gem_object_handle_unreference_unlocked(obj); 265 drm_gem_object_handle_unreference_unlocked(obj);
261 266
262 return 0; 267 return 0;
263} 268}
264EXPORT_SYMBOL(drm_gem_handle_delete); 269EXPORT_SYMBOL(drm_gem_handle_delete);
265 270
266/** 271/**
267 * Create a handle for this object. This adds a handle reference 272 * Create a handle for this object. This adds a handle reference
268 * to the object, which includes a regular reference count. Callers 273 * to the object, which includes a regular reference count. Callers
269 * will likely want to dereference the object afterwards. 274 * will likely want to dereference the object afterwards.
270 */ 275 */
271int 276int
272drm_gem_handle_create(struct drm_file *file_priv, 277drm_gem_handle_create(struct drm_file *file_priv,
273 struct drm_gem_object *obj, 278 struct drm_gem_object *obj,
274 u32 *handlep) 279 u32 *handlep)
275{ 280{
276 struct drm_device *dev = obj->dev; 281 struct drm_device *dev = obj->dev;
277 int ret; 282 int ret;
278 283
279 /* 284 /*
280 * Get the user-visible handle using idr. 285 * Get the user-visible handle using idr.
281 */ 286 */
282again: 287again:
283 /* ensure there is space available to allocate a handle */ 288 /* ensure there is space available to allocate a handle */
284 if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0) 289 if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
285 return -ENOMEM; 290 return -ENOMEM;
286 291
287 /* do the allocation under our spinlock */ 292 /* do the allocation under our spinlock */
288 spin_lock(&file_priv->table_lock); 293 spin_lock(&file_priv->table_lock);
289 ret = idr_get_new_above(&file_priv->object_idr, obj, 1, (int *)handlep); 294 ret = idr_get_new_above(&file_priv->object_idr, obj, 1, (int *)handlep);
290 spin_unlock(&file_priv->table_lock); 295 spin_unlock(&file_priv->table_lock);
291 if (ret == -EAGAIN) 296 if (ret == -EAGAIN)
292 goto again; 297 goto again;
293 else if (ret) 298 else if (ret)
294 return ret; 299 return ret;
295 300
296 drm_gem_object_handle_reference(obj); 301 drm_gem_object_handle_reference(obj);
297 302
298 if (dev->driver->gem_open_object) { 303 if (dev->driver->gem_open_object) {
299 ret = dev->driver->gem_open_object(obj, file_priv); 304 ret = dev->driver->gem_open_object(obj, file_priv);
300 if (ret) { 305 if (ret) {
301 drm_gem_handle_delete(file_priv, *handlep); 306 drm_gem_handle_delete(file_priv, *handlep);
302 return ret; 307 return ret;
303 } 308 }
304 } 309 }
305 310
306 return 0; 311 return 0;
307} 312}
308EXPORT_SYMBOL(drm_gem_handle_create); 313EXPORT_SYMBOL(drm_gem_handle_create);
309 314
310 315
311/** 316/**
312 * drm_gem_free_mmap_offset - release a fake mmap offset for an object 317 * drm_gem_free_mmap_offset - release a fake mmap offset for an object
313 * @obj: obj in question 318 * @obj: obj in question
314 * 319 *
315 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset(). 320 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
316 */ 321 */
317void 322void
318drm_gem_free_mmap_offset(struct drm_gem_object *obj) 323drm_gem_free_mmap_offset(struct drm_gem_object *obj)
319{ 324{
320 struct drm_device *dev = obj->dev; 325 struct drm_device *dev = obj->dev;
321 struct drm_gem_mm *mm = dev->mm_private; 326 struct drm_gem_mm *mm = dev->mm_private;
322 struct drm_map_list *list = &obj->map_list; 327 struct drm_map_list *list = &obj->map_list;
323 328
324 drm_ht_remove_item(&mm->offset_hash, &list->hash); 329 drm_ht_remove_item(&mm->offset_hash, &list->hash);
325 drm_mm_put_block(list->file_offset_node); 330 drm_mm_put_block(list->file_offset_node);
326 kfree(list->map); 331 kfree(list->map);
327 list->map = NULL; 332 list->map = NULL;
328} 333}
329EXPORT_SYMBOL(drm_gem_free_mmap_offset); 334EXPORT_SYMBOL(drm_gem_free_mmap_offset);
330 335
331/** 336/**
332 * drm_gem_create_mmap_offset - create a fake mmap offset for an object 337 * drm_gem_create_mmap_offset - create a fake mmap offset for an object
333 * @obj: obj in question 338 * @obj: obj in question
334 * 339 *
335 * GEM memory mapping works by handing back to userspace a fake mmap offset 340 * GEM memory mapping works by handing back to userspace a fake mmap offset
336 * it can use in a subsequent mmap(2) call. The DRM core code then looks 341 * it can use in a subsequent mmap(2) call. The DRM core code then looks
337 * up the object based on the offset and sets up the various memory mapping 342 * up the object based on the offset and sets up the various memory mapping
338 * structures. 343 * structures.
339 * 344 *
340 * This routine allocates and attaches a fake offset for @obj. 345 * This routine allocates and attaches a fake offset for @obj.
341 */ 346 */
342int 347int
343drm_gem_create_mmap_offset(struct drm_gem_object *obj) 348drm_gem_create_mmap_offset(struct drm_gem_object *obj)
344{ 349{
345 struct drm_device *dev = obj->dev; 350 struct drm_device *dev = obj->dev;
346 struct drm_gem_mm *mm = dev->mm_private; 351 struct drm_gem_mm *mm = dev->mm_private;
347 struct drm_map_list *list; 352 struct drm_map_list *list;
348 struct drm_local_map *map; 353 struct drm_local_map *map;
349 int ret; 354 int ret;
350 355
351 /* Set the object up for mmap'ing */ 356 /* Set the object up for mmap'ing */
352 list = &obj->map_list; 357 list = &obj->map_list;
353 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL); 358 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
354 if (!list->map) 359 if (!list->map)
355 return -ENOMEM; 360 return -ENOMEM;
356 361
357 map = list->map; 362 map = list->map;
358 map->type = _DRM_GEM; 363 map->type = _DRM_GEM;
359 map->size = obj->size; 364 map->size = obj->size;
360 map->handle = obj; 365 map->handle = obj;
361 366
362 /* Get a DRM GEM mmap offset allocated... */ 367 /* Get a DRM GEM mmap offset allocated... */
363 list->file_offset_node = drm_mm_search_free(&mm->offset_manager, 368 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
364 obj->size / PAGE_SIZE, 0, false); 369 obj->size / PAGE_SIZE, 0, false);
365 370
366 if (!list->file_offset_node) { 371 if (!list->file_offset_node) {
367 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name); 372 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
368 ret = -ENOSPC; 373 ret = -ENOSPC;
369 goto out_free_list; 374 goto out_free_list;
370 } 375 }
371 376
372 list->file_offset_node = drm_mm_get_block(list->file_offset_node, 377 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
373 obj->size / PAGE_SIZE, 0); 378 obj->size / PAGE_SIZE, 0);
374 if (!list->file_offset_node) { 379 if (!list->file_offset_node) {
375 ret = -ENOMEM; 380 ret = -ENOMEM;
376 goto out_free_list; 381 goto out_free_list;
377 } 382 }
378 383
379 list->hash.key = list->file_offset_node->start; 384 list->hash.key = list->file_offset_node->start;
380 ret = drm_ht_insert_item(&mm->offset_hash, &list->hash); 385 ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
381 if (ret) { 386 if (ret) {
382 DRM_ERROR("failed to add to map hash\n"); 387 DRM_ERROR("failed to add to map hash\n");
383 goto out_free_mm; 388 goto out_free_mm;
384 } 389 }
385 390
386 return 0; 391 return 0;
387 392
388out_free_mm: 393out_free_mm:
389 drm_mm_put_block(list->file_offset_node); 394 drm_mm_put_block(list->file_offset_node);
390out_free_list: 395out_free_list:
391 kfree(list->map); 396 kfree(list->map);
392 list->map = NULL; 397 list->map = NULL;
393 398
394 return ret; 399 return ret;
395} 400}
396EXPORT_SYMBOL(drm_gem_create_mmap_offset); 401EXPORT_SYMBOL(drm_gem_create_mmap_offset);
397 402
398/** Returns a reference to the object named by the handle. */ 403/** Returns a reference to the object named by the handle. */
399struct drm_gem_object * 404struct drm_gem_object *
400drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp, 405drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
401 u32 handle) 406 u32 handle)
402{ 407{
403 struct drm_gem_object *obj; 408 struct drm_gem_object *obj;
404 409
405 spin_lock(&filp->table_lock); 410 spin_lock(&filp->table_lock);
406 411
407 /* Check if we currently have a reference on the object */ 412 /* Check if we currently have a reference on the object */
408 obj = idr_find(&filp->object_idr, handle); 413 obj = idr_find(&filp->object_idr, handle);
409 if (obj == NULL) { 414 if (obj == NULL) {
410 spin_unlock(&filp->table_lock); 415 spin_unlock(&filp->table_lock);
411 return NULL; 416 return NULL;
412 } 417 }
413 418
414 drm_gem_object_reference(obj); 419 drm_gem_object_reference(obj);
415 420
416 spin_unlock(&filp->table_lock); 421 spin_unlock(&filp->table_lock);
417 422
418 return obj; 423 return obj;
419} 424}
420EXPORT_SYMBOL(drm_gem_object_lookup); 425EXPORT_SYMBOL(drm_gem_object_lookup);
421 426
422/** 427/**
423 * Releases the handle to an mm object. 428 * Releases the handle to an mm object.
424 */ 429 */
425int 430int
426drm_gem_close_ioctl(struct drm_device *dev, void *data, 431drm_gem_close_ioctl(struct drm_device *dev, void *data,
427 struct drm_file *file_priv) 432 struct drm_file *file_priv)
428{ 433{
429 struct drm_gem_close *args = data; 434 struct drm_gem_close *args = data;
430 int ret; 435 int ret;
431 436
432 if (!(dev->driver->driver_features & DRIVER_GEM)) 437 if (!(dev->driver->driver_features & DRIVER_GEM))
433 return -ENODEV; 438 return -ENODEV;
434 439
435 ret = drm_gem_handle_delete(file_priv, args->handle); 440 ret = drm_gem_handle_delete(file_priv, args->handle);
436 441
437 return ret; 442 return ret;
438} 443}
439 444
440/** 445/**
441 * Create a global name for an object, returning the name. 446 * Create a global name for an object, returning the name.
442 * 447 *
443 * Note that the name does not hold a reference; when the object 448 * Note that the name does not hold a reference; when the object
444 * is freed, the name goes away. 449 * is freed, the name goes away.
445 */ 450 */
446int 451int
447drm_gem_flink_ioctl(struct drm_device *dev, void *data, 452drm_gem_flink_ioctl(struct drm_device *dev, void *data,
448 struct drm_file *file_priv) 453 struct drm_file *file_priv)
449{ 454{
450 struct drm_gem_flink *args = data; 455 struct drm_gem_flink *args = data;
451 struct drm_gem_object *obj; 456 struct drm_gem_object *obj;
452 int ret; 457 int ret;
453 458
454 if (!(dev->driver->driver_features & DRIVER_GEM)) 459 if (!(dev->driver->driver_features & DRIVER_GEM))
455 return -ENODEV; 460 return -ENODEV;
456 461
457 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 462 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
458 if (obj == NULL) 463 if (obj == NULL)
459 return -ENOENT; 464 return -ENOENT;
460 465
461again: 466again:
462 if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) { 467 if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) {
463 ret = -ENOMEM; 468 ret = -ENOMEM;
464 goto err; 469 goto err;
465 } 470 }
466 471
467 spin_lock(&dev->object_name_lock); 472 spin_lock(&dev->object_name_lock);
468 if (!obj->name) { 473 if (!obj->name) {
469 ret = idr_get_new_above(&dev->object_name_idr, obj, 1, 474 ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
470 &obj->name); 475 &obj->name);
471 args->name = (uint64_t) obj->name; 476 args->name = (uint64_t) obj->name;
472 spin_unlock(&dev->object_name_lock); 477 spin_unlock(&dev->object_name_lock);
473 478
474 if (ret == -EAGAIN) 479 if (ret == -EAGAIN)
475 goto again; 480 goto again;
476 else if (ret) 481 else if (ret)
477 goto err; 482 goto err;
478 483
479 /* Allocate a reference for the name table. */ 484 /* Allocate a reference for the name table. */
480 drm_gem_object_reference(obj); 485 drm_gem_object_reference(obj);
481 } else { 486 } else {
482 args->name = (uint64_t) obj->name; 487 args->name = (uint64_t) obj->name;
483 spin_unlock(&dev->object_name_lock); 488 spin_unlock(&dev->object_name_lock);
484 ret = 0; 489 ret = 0;
485 } 490 }
486 491
487err: 492err:
488 drm_gem_object_unreference_unlocked(obj); 493 drm_gem_object_unreference_unlocked(obj);
489 return ret; 494 return ret;
490} 495}
491 496
492/** 497/**
493 * Open an object using the global name, returning a handle and the size. 498 * Open an object using the global name, returning a handle and the size.
494 * 499 *
495 * This handle (of course) holds a reference to the object, so the object 500 * This handle (of course) holds a reference to the object, so the object
496 * will not go away until the handle is deleted. 501 * will not go away until the handle is deleted.
497 */ 502 */
498int 503int
499drm_gem_open_ioctl(struct drm_device *dev, void *data, 504drm_gem_open_ioctl(struct drm_device *dev, void *data,
500 struct drm_file *file_priv) 505 struct drm_file *file_priv)
501{ 506{
502 struct drm_gem_open *args = data; 507 struct drm_gem_open *args = data;
503 struct drm_gem_object *obj; 508 struct drm_gem_object *obj;
504 int ret; 509 int ret;
505 u32 handle; 510 u32 handle;
506 511
507 if (!(dev->driver->driver_features & DRIVER_GEM)) 512 if (!(dev->driver->driver_features & DRIVER_GEM))
508 return -ENODEV; 513 return -ENODEV;
509 514
510 spin_lock(&dev->object_name_lock); 515 spin_lock(&dev->object_name_lock);
511 obj = idr_find(&dev->object_name_idr, (int) args->name); 516 obj = idr_find(&dev->object_name_idr, (int) args->name);
512 if (obj) 517 if (obj)
513 drm_gem_object_reference(obj); 518 drm_gem_object_reference(obj);
514 spin_unlock(&dev->object_name_lock); 519 spin_unlock(&dev->object_name_lock);
515 if (!obj) 520 if (!obj)
516 return -ENOENT; 521 return -ENOENT;
517 522
518 ret = drm_gem_handle_create(file_priv, obj, &handle); 523 ret = drm_gem_handle_create(file_priv, obj, &handle);
519 drm_gem_object_unreference_unlocked(obj); 524 drm_gem_object_unreference_unlocked(obj);
520 if (ret) 525 if (ret)
521 return ret; 526 return ret;
522 527
523 args->handle = handle; 528 args->handle = handle;
524 args->size = obj->size; 529 args->size = obj->size;
525 530
526 return 0; 531 return 0;
527} 532}
528 533
529/** 534/**
530 * Called at device open time, sets up the structure for handling refcounting 535 * Called at device open time, sets up the structure for handling refcounting
531 * of mm objects. 536 * of mm objects.
532 */ 537 */
533void 538void
534drm_gem_open(struct drm_device *dev, struct drm_file *file_private) 539drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
535{ 540{
536 idr_init(&file_private->object_idr); 541 idr_init(&file_private->object_idr);
537 spin_lock_init(&file_private->table_lock); 542 spin_lock_init(&file_private->table_lock);
538} 543}
539 544
540/** 545/**
541 * Called at device close to release the file's 546 * Called at device close to release the file's
542 * handle references on objects. 547 * handle references on objects.
543 */ 548 */
544static int 549static int
545drm_gem_object_release_handle(int id, void *ptr, void *data) 550drm_gem_object_release_handle(int id, void *ptr, void *data)
546{ 551{
547 struct drm_file *file_priv = data; 552 struct drm_file *file_priv = data;
548 struct drm_gem_object *obj = ptr; 553 struct drm_gem_object *obj = ptr;
549 struct drm_device *dev = obj->dev; 554 struct drm_device *dev = obj->dev;
550 555
551#ifndef __NetBSD__ /* XXX drm prime */ 556#ifndef __NetBSD__ /* XXX drm prime */
552 drm_gem_remove_prime_handles(obj, file_priv); 557 drm_gem_remove_prime_handles(obj, file_priv);
553#endif 558#endif
554 559
555 if (dev->driver->gem_close_object) 560 if (dev->driver->gem_close_object)
556 dev->driver->gem_close_object(obj, file_priv); 561 dev->driver->gem_close_object(obj, file_priv);
557 562
558 drm_gem_object_handle_unreference_unlocked(obj); 563 drm_gem_object_handle_unreference_unlocked(obj);
559 564
560 return 0; 565 return 0;
561} 566}
562 567
563/** 568/**
564 * Called at close time when the filp is going away. 569 * Called at close time when the filp is going away.
565 * 570 *
566 * Releases any remaining references on objects by this filp. 571 * Releases any remaining references on objects by this filp.
567 */ 572 */
568void 573void
569drm_gem_release(struct drm_device *dev, struct drm_file *file_private) 574drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
570{ 575{
571 idr_for_each(&file_private->object_idr, 576 idr_for_each(&file_private->object_idr,
572 &drm_gem_object_release_handle, file_private); 577 &drm_gem_object_release_handle, file_private);
573 578
574 idr_remove_all(&file_private->object_idr); 579 idr_remove_all(&file_private->object_idr);
575 idr_destroy(&file_private->object_idr); 580 idr_destroy(&file_private->object_idr);
576} 581}
577 582
578void 583void
579drm_gem_object_release(struct drm_gem_object *obj) 584drm_gem_object_release(struct drm_gem_object *obj)
580{ 585{
581 if (obj->filp) 586 if (obj->filp)
582 fput(obj->filp); 587 fput(obj->filp);
583} 588}
584EXPORT_SYMBOL(drm_gem_object_release); 589EXPORT_SYMBOL(drm_gem_object_release);
585 590
586/** 591/**
587 * Called after the last reference to the object has been lost. 592 * Called after the last reference to the object has been lost.
588 * Must be called holding struct_ mutex 593 * Must be called holding struct_ mutex
589 * 594 *
590 * Frees the object 595 * Frees the object
591 */ 596 */
592void 597void
593drm_gem_object_free(struct kref *kref) 598drm_gem_object_free(struct kref *kref)
594{ 599{
595 struct drm_gem_object *obj = (struct drm_gem_object *) kref; 600 struct drm_gem_object *obj = (struct drm_gem_object *) kref;
596 struct drm_device *dev = obj->dev; 601 struct drm_device *dev = obj->dev;
597 602
598 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 603 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
599 604
600 if (dev->driver->gem_free_object != NULL) 605 if (dev->driver->gem_free_object != NULL)
601 dev->driver->gem_free_object(obj); 606 dev->driver->gem_free_object(obj);
602} 607}
603EXPORT_SYMBOL(drm_gem_object_free); 608EXPORT_SYMBOL(drm_gem_object_free);
604 609
605static void drm_gem_object_ref_bug(struct kref *list_kref) 610static void drm_gem_object_ref_bug(struct kref *list_kref)
606{ 611{
607 BUG(); 612 BUG();
608} 613}
609 614
610/** 615/**
611 * Called after the last handle to the object has been closed 616 * Called after the last handle to the object has been closed
612 * 617 *
613 * Removes any name for the object. Note that this must be 618 * Removes any name for the object. Note that this must be
614 * called before drm_gem_object_free or we'll be touching 619 * called before drm_gem_object_free or we'll be touching
615 * freed memory 620 * freed memory
616 */ 621 */
617void drm_gem_object_handle_free(struct drm_gem_object *obj) 622void drm_gem_object_handle_free(struct drm_gem_object *obj)
618{ 623{
619 struct drm_device *dev = obj->dev; 624 struct drm_device *dev = obj->dev;
620 625
621 /* Remove any name for this object */ 626 /* Remove any name for this object */
622 spin_lock(&dev->object_name_lock); 627 spin_lock(&dev->object_name_lock);
623 if (obj->name) { 628 if (obj->name) {
624 idr_remove(&dev->object_name_idr, obj->name); 629 idr_remove(&dev->object_name_idr, obj->name);
625 obj->name = 0; 630 obj->name = 0;
626 spin_unlock(&dev->object_name_lock); 631 spin_unlock(&dev->object_name_lock);
627 /* 632 /*
628 * The object name held a reference to this object, drop 633 * The object name held a reference to this object, drop
629 * that now. 634 * that now.
630 * 635 *
631 * This cannot be the last reference, since the handle holds one too. 636 * This cannot be the last reference, since the handle holds one too.
632 */ 637 */
633 kref_put(&obj->refcount, drm_gem_object_ref_bug); 638 kref_put(&obj->refcount, drm_gem_object_ref_bug);
634 } else 639 } else
635 spin_unlock(&dev->object_name_lock); 640 spin_unlock(&dev->object_name_lock);
636 641
637} 642}
638EXPORT_SYMBOL(drm_gem_object_handle_free); 643EXPORT_SYMBOL(drm_gem_object_handle_free);
639 644
640void drm_gem_vm_open(struct vm_area_struct *vma) 645void drm_gem_vm_open(struct vm_area_struct *vma)
641{ 646{
642 struct drm_gem_object *obj = vma->vm_private_data; 647 struct drm_gem_object *obj = vma->vm_private_data;
643 648
644 drm_gem_object_reference(obj); 649 drm_gem_object_reference(obj);
645 650
646 mutex_lock(&obj->dev->struct_mutex); 651 mutex_lock(&obj->dev->struct_mutex);
647 drm_vm_open_locked(obj->dev, vma); 652 drm_vm_open_locked(obj->dev, vma);
648 mutex_unlock(&obj->dev->struct_mutex); 653 mutex_unlock(&obj->dev->struct_mutex);
649} 654}
650EXPORT_SYMBOL(drm_gem_vm_open); 655EXPORT_SYMBOL(drm_gem_vm_open);
651 656
652void drm_gem_vm_close(struct vm_area_struct *vma) 657void drm_gem_vm_close(struct vm_area_struct *vma)
653{ 658{
654 struct drm_gem_object *obj = vma->vm_private_data; 659 struct drm_gem_object *obj = vma->vm_private_data;
655 struct drm_device *dev = obj->dev; 660 struct drm_device *dev = obj->dev;
656 661
657 mutex_lock(&dev->struct_mutex); 662 mutex_lock(&dev->struct_mutex);
658 drm_vm_close_locked(obj->dev, vma); 663 drm_vm_close_locked(obj->dev, vma);
659 drm_gem_object_unreference(obj); 664 drm_gem_object_unreference(obj);
660 mutex_unlock(&dev->struct_mutex); 665 mutex_unlock(&dev->struct_mutex);
661} 666}
662EXPORT_SYMBOL(drm_gem_vm_close); 667EXPORT_SYMBOL(drm_gem_vm_close);
663 668
664 669
665/** 670/**
666 * drm_gem_mmap - memory map routine for GEM objects 671 * drm_gem_mmap - memory map routine for GEM objects
667 * @filp: DRM file pointer 672 * @filp: DRM file pointer
668 * @vma: VMA for the area to be mapped 673 * @vma: VMA for the area to be mapped
669 * 674 *
670 * If a driver supports GEM object mapping, mmap calls on the DRM file 675 * If a driver supports GEM object mapping, mmap calls on the DRM file
671 * descriptor will end up here. 676 * descriptor will end up here.
672 * 677 *
673 * If we find the object based on the offset passed in (vma->vm_pgoff will 678 * If we find the object based on the offset passed in (vma->vm_pgoff will
674 * contain the fake offset we created when the GTT map ioctl was called on 679 * contain the fake offset we created when the GTT map ioctl was called on
675 * the object), we set up the driver fault handler so that any accesses 680 * the object), we set up the driver fault handler so that any accesses
676 * to the object can be trapped, to perform migration, GTT binding, surface 681 * to the object can be trapped, to perform migration, GTT binding, surface
677 * register allocation, or performance monitoring. 682 * register allocation, or performance monitoring.
678 */ 683 */
679int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 684int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
680{ 685{
681 struct drm_file *priv = filp->private_data; 686 struct drm_file *priv = filp->private_data;
682 struct drm_device *dev = priv->minor->dev; 687 struct drm_device *dev = priv->minor->dev;
683 struct drm_gem_mm *mm = dev->mm_private; 688 struct drm_gem_mm *mm = dev->mm_private;
684 struct drm_local_map *map = NULL; 689 struct drm_local_map *map = NULL;
685 struct drm_gem_object *obj; 690 struct drm_gem_object *obj;
686 struct drm_hash_item *hash; 691 struct drm_hash_item *hash;
687 int ret = 0; 692 int ret = 0;
688 693
689 if (drm_device_is_unplugged(dev)) 694 if (drm_device_is_unplugged(dev))
690 return -ENODEV; 695 return -ENODEV;
691 696
692 mutex_lock(&dev->struct_mutex); 697 mutex_lock(&dev->struct_mutex);
693 698
694 if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) { 699 if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
695 mutex_unlock(&dev->struct_mutex); 700 mutex_unlock(&dev->struct_mutex);
696 return drm_mmap(filp, vma); 701 return drm_mmap(filp, vma);
697 } 702 }
698 703
699 map = drm_hash_entry(hash, struct drm_map_list, hash)->map; 704 map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
700 if (!map || 705 if (!map ||
701 ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) { 706 ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
702 ret = -EPERM; 707 ret = -EPERM;
703 goto out_unlock; 708 goto out_unlock;
704 } 709 }
705 710
706 /* Check for valid size. */ 711 /* Check for valid size. */
707 if (map->size < vma->vm_end - vma->vm_start) { 712 if (map->size < vma->vm_end - vma->vm_start) {
708 ret = -EINVAL; 713 ret = -EINVAL;
709 goto out_unlock; 714 goto out_unlock;
710 } 715 }
711 716
712 obj = map->handle; 717 obj = map->handle;
713 if (!obj->dev->driver->gem_vm_ops) { 718 if (!obj->dev->driver->gem_vm_ops) {
714 ret = -EINVAL; 719 ret = -EINVAL;
715 goto out_unlock; 720 goto out_unlock;
716 } 721 }
717 722
718 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; 723 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
719 vma->vm_ops = obj->dev->driver->gem_vm_ops; 724 vma->vm_ops = obj->dev->driver->gem_vm_ops;
720 vma->vm_private_data = map->handle; 725 vma->vm_private_data = map->handle;
721 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 726 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
722 727
723 /* Take a ref for this mapping of the object, so that the fault 728 /* Take a ref for this mapping of the object, so that the fault
724 * handler can dereference the mmap offset's pointer to the object. 729 * handler can dereference the mmap offset's pointer to the object.
725 * This reference is cleaned up by the corresponding vm_close 730 * This reference is cleaned up by the corresponding vm_close
726 * (which should happen whether the vma was created by this call, or 731 * (which should happen whether the vma was created by this call, or
727 * by a vm_open due to mremap or partial unmap or whatever). 732 * by a vm_open due to mremap or partial unmap or whatever).
728 */ 733 */
729 drm_gem_object_reference(obj); 734 drm_gem_object_reference(obj);
730 735
731 drm_vm_open_locked(dev, vma); 736 drm_vm_open_locked(dev, vma);
732 737
733out_unlock: 738out_unlock:
734 mutex_unlock(&dev->struct_mutex); 739 mutex_unlock(&dev->struct_mutex);
735 740
736 return ret; 741 return ret;
737} 742}
738EXPORT_SYMBOL(drm_gem_mmap); 743EXPORT_SYMBOL(drm_gem_mmap);