]> nv-tegra.nvidia Code Review - linux-2.6.git/blob - drivers/gpu/drm/drm_vm.c
drm: add core support for unplugging a device (v2)
[linux-2.6.git] / drivers / gpu / drm / drm_vm.c
1 /**
2  * \file drm_vm.c
3  * Memory mapping for DRM
4  *
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  */
8
9 /*
10  * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
11  *
12  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14  * All Rights Reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  */
35
36 #include "drmP.h"
37 #include <linux/export.h>
38 #if defined(__ia64__)
39 #include <linux/efi.h>
40 #include <linux/slab.h>
41 #endif
42
43 static void drm_vm_open(struct vm_area_struct *vma);
44 static void drm_vm_close(struct vm_area_struct *vma);
45
46 static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
47 {
48         pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
49
50 #if defined(__i386__) || defined(__x86_64__)
51         if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
52                 pgprot_val(tmp) |= _PAGE_PCD;
53                 pgprot_val(tmp) &= ~_PAGE_PWT;
54         }
55 #elif defined(__powerpc__)
56         pgprot_val(tmp) |= _PAGE_NO_CACHE;
57         if (map_type == _DRM_REGISTERS)
58                 pgprot_val(tmp) |= _PAGE_GUARDED;
59 #elif defined(__ia64__)
60         if (efi_range_is_wc(vma->vm_start, vma->vm_end -
61                                     vma->vm_start))
62                 tmp = pgprot_writecombine(tmp);
63         else
64                 tmp = pgprot_noncached(tmp);
65 #elif defined(__sparc__) || defined(__arm__)
66         tmp = pgprot_noncached(tmp);
67 #endif
68         return tmp;
69 }
70
71 static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
72 {
73         pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
74
75 #if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
76         tmp |= _PAGE_NO_CACHE;
77 #endif
78         return tmp;
79 }
80
81 /**
82  * \c fault method for AGP virtual memory.
83  *
84  * \param vma virtual memory area.
85  * \param address access address.
86  * \return pointer to the page structure.
87  *
88  * Find the right map and if it's AGP memory find the real physical page to
89  * map, get the page, increment the use count and return it.
90  */
91 #if __OS_HAS_AGP
92 static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
93 {
94         struct drm_file *priv = vma->vm_file->private_data;
95         struct drm_device *dev = priv->minor->dev;
96         struct drm_local_map *map = NULL;
97         struct drm_map_list *r_list;
98         struct drm_hash_item *hash;
99
100         /*
101          * Find the right map
102          */
103         if (!drm_core_has_AGP(dev))
104                 goto vm_fault_error;
105
106         if (!dev->agp || !dev->agp->cant_use_aperture)
107                 goto vm_fault_error;
108
109         if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
110                 goto vm_fault_error;
111
112         r_list = drm_hash_entry(hash, struct drm_map_list, hash);
113         map = r_list->map;
114
115         if (map && map->type == _DRM_AGP) {
116                 /*
117                  * Using vm_pgoff as a selector forces us to use this unusual
118                  * addressing scheme.
119                  */
120                 resource_size_t offset = (unsigned long)vmf->virtual_address -
121                         vma->vm_start;
122                 resource_size_t baddr = map->offset + offset;
123                 struct drm_agp_mem *agpmem;
124                 struct page *page;
125
126 #ifdef __alpha__
127                 /*
128                  * Adjust to a bus-relative address
129                  */
130                 baddr -= dev->hose->mem_space->start;
131 #endif
132
133                 /*
134                  * It's AGP memory - find the real physical page to map
135                  */
136                 list_for_each_entry(agpmem, &dev->agp->memory, head) {
137                         if (agpmem->bound <= baddr &&
138                             agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
139                                 break;
140                 }
141
142                 if (&agpmem->head == &dev->agp->memory)
143                         goto vm_fault_error;
144
145                 /*
146                  * Get the page, inc the use count, and return it
147                  */
148                 offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
149                 page = agpmem->memory->pages[offset];
150                 get_page(page);
151                 vmf->page = page;
152
153                 DRM_DEBUG
154                     ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
155                      (unsigned long long)baddr,
156                      agpmem->memory->pages[offset],
157                      (unsigned long long)offset,
158                      page_count(page));
159                 return 0;
160         }
161 vm_fault_error:
162         return VM_FAULT_SIGBUS; /* Disallow mremap */
163 }
164 #else                           /* __OS_HAS_AGP */
165 static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
166 {
167         return VM_FAULT_SIGBUS;
168 }
169 #endif                          /* __OS_HAS_AGP */
170
171 /**
172  * \c nopage method for shared virtual memory.
173  *
174  * \param vma virtual memory area.
175  * \param address access address.
176  * \return pointer to the page structure.
177  *
178  * Get the mapping, find the real physical page to map, get the page, and
179  * return it.
180  */
181 static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
182 {
183         struct drm_local_map *map = vma->vm_private_data;
184         unsigned long offset;
185         unsigned long i;
186         struct page *page;
187
188         if (!map)
189                 return VM_FAULT_SIGBUS; /* Nothing allocated */
190
191         offset = (unsigned long)vmf->virtual_address - vma->vm_start;
192         i = (unsigned long)map->handle + offset;
193         page = vmalloc_to_page((void *)i);
194         if (!page)
195                 return VM_FAULT_SIGBUS;
196         get_page(page);
197         vmf->page = page;
198
199         DRM_DEBUG("shm_fault 0x%lx\n", offset);
200         return 0;
201 }
202
203 /**
204  * \c close method for shared virtual memory.
205  *
206  * \param vma virtual memory area.
207  *
208  * Deletes map information if we are the last
209  * person to close a mapping and it's not in the global maplist.
210  */
211 static void drm_vm_shm_close(struct vm_area_struct *vma)
212 {
213         struct drm_file *priv = vma->vm_file->private_data;
214         struct drm_device *dev = priv->minor->dev;
215         struct drm_vma_entry *pt, *temp;
216         struct drm_local_map *map;
217         struct drm_map_list *r_list;
218         int found_maps = 0;
219
220         DRM_DEBUG("0x%08lx,0x%08lx\n",
221                   vma->vm_start, vma->vm_end - vma->vm_start);
222         atomic_dec(&dev->vma_count);
223
224         map = vma->vm_private_data;
225
226         mutex_lock(&dev->struct_mutex);
227         list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
228                 if (pt->vma->vm_private_data == map)
229                         found_maps++;
230                 if (pt->vma == vma) {
231                         list_del(&pt->head);
232                         kfree(pt);
233                 }
234         }
235
236         /* We were the only map that was found */
237         if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
238                 /* Check to see if we are in the maplist, if we are not, then
239                  * we delete this mappings information.
240                  */
241                 found_maps = 0;
242                 list_for_each_entry(r_list, &dev->maplist, head) {
243                         if (r_list->map == map)
244                                 found_maps++;
245                 }
246
247                 if (!found_maps) {
248                         drm_dma_handle_t dmah;
249
250                         switch (map->type) {
251                         case _DRM_REGISTERS:
252                         case _DRM_FRAME_BUFFER:
253                                 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
254                                         int retcode;
255                                         retcode = mtrr_del(map->mtrr,
256                                                            map->offset,
257                                                            map->size);
258                                         DRM_DEBUG("mtrr_del = %d\n", retcode);
259                                 }
260                                 iounmap(map->handle);
261                                 break;
262                         case _DRM_SHM:
263                                 vfree(map->handle);
264                                 break;
265                         case _DRM_AGP:
266                         case _DRM_SCATTER_GATHER:
267                                 break;
268                         case _DRM_CONSISTENT:
269                                 dmah.vaddr = map->handle;
270                                 dmah.busaddr = map->offset;
271                                 dmah.size = map->size;
272                                 __drm_pci_free(dev, &dmah);
273                                 break;
274                         case _DRM_GEM:
275                                 DRM_ERROR("tried to rmmap GEM object\n");
276                                 break;
277                         }
278                         kfree(map);
279                 }
280         }
281         mutex_unlock(&dev->struct_mutex);
282 }
283
284 /**
285  * \c fault method for DMA virtual memory.
286  *
287  * \param vma virtual memory area.
288  * \param address access address.
289  * \return pointer to the page structure.
290  *
291  * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
292  */
293 static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
294 {
295         struct drm_file *priv = vma->vm_file->private_data;
296         struct drm_device *dev = priv->minor->dev;
297         struct drm_device_dma *dma = dev->dma;
298         unsigned long offset;
299         unsigned long page_nr;
300         struct page *page;
301
302         if (!dma)
303                 return VM_FAULT_SIGBUS; /* Error */
304         if (!dma->pagelist)
305                 return VM_FAULT_SIGBUS; /* Nothing allocated */
306
307         offset = (unsigned long)vmf->virtual_address - vma->vm_start;   /* vm_[pg]off[set] should be 0 */
308         page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
309         page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
310
311         get_page(page);
312         vmf->page = page;
313
314         DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
315         return 0;
316 }
317
318 /**
319  * \c fault method for scatter-gather virtual memory.
320  *
321  * \param vma virtual memory area.
322  * \param address access address.
323  * \return pointer to the page structure.
324  *
325  * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
326  */
327 static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
328 {
329         struct drm_local_map *map = vma->vm_private_data;
330         struct drm_file *priv = vma->vm_file->private_data;
331         struct drm_device *dev = priv->minor->dev;
332         struct drm_sg_mem *entry = dev->sg;
333         unsigned long offset;
334         unsigned long map_offset;
335         unsigned long page_offset;
336         struct page *page;
337
338         if (!entry)
339                 return VM_FAULT_SIGBUS; /* Error */
340         if (!entry->pagelist)
341                 return VM_FAULT_SIGBUS; /* Nothing allocated */
342
343         offset = (unsigned long)vmf->virtual_address - vma->vm_start;
344         map_offset = map->offset - (unsigned long)dev->sg->virtual;
345         page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
346         page = entry->pagelist[page_offset];
347         get_page(page);
348         vmf->page = page;
349
350         return 0;
351 }
352
353 static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
354 {
355         return drm_do_vm_fault(vma, vmf);
356 }
357
358 static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
359 {
360         return drm_do_vm_shm_fault(vma, vmf);
361 }
362
363 static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
364 {
365         return drm_do_vm_dma_fault(vma, vmf);
366 }
367
368 static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
369 {
370         return drm_do_vm_sg_fault(vma, vmf);
371 }
372
373 /** AGP virtual memory operations */
374 static const struct vm_operations_struct drm_vm_ops = {
375         .fault = drm_vm_fault,
376         .open = drm_vm_open,
377         .close = drm_vm_close,
378 };
379
380 /** Shared virtual memory operations */
381 static const struct vm_operations_struct drm_vm_shm_ops = {
382         .fault = drm_vm_shm_fault,
383         .open = drm_vm_open,
384         .close = drm_vm_shm_close,
385 };
386
387 /** DMA virtual memory operations */
388 static const struct vm_operations_struct drm_vm_dma_ops = {
389         .fault = drm_vm_dma_fault,
390         .open = drm_vm_open,
391         .close = drm_vm_close,
392 };
393
394 /** Scatter-gather virtual memory operations */
395 static const struct vm_operations_struct drm_vm_sg_ops = {
396         .fault = drm_vm_sg_fault,
397         .open = drm_vm_open,
398         .close = drm_vm_close,
399 };
400
401 /**
402  * \c open method for shared virtual memory.
403  *
404  * \param vma virtual memory area.
405  *
406  * Create a new drm_vma_entry structure as the \p vma private data entry and
407  * add it to drm_device::vmalist.
408  */
409 void drm_vm_open_locked(struct vm_area_struct *vma)
410 {
411         struct drm_file *priv = vma->vm_file->private_data;
412         struct drm_device *dev = priv->minor->dev;
413         struct drm_vma_entry *vma_entry;
414
415         DRM_DEBUG("0x%08lx,0x%08lx\n",
416                   vma->vm_start, vma->vm_end - vma->vm_start);
417         atomic_inc(&dev->vma_count);
418
419         vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
420         if (vma_entry) {
421                 vma_entry->vma = vma;
422                 vma_entry->pid = current->pid;
423                 list_add(&vma_entry->head, &dev->vmalist);
424         }
425 }
426
427 static void drm_vm_open(struct vm_area_struct *vma)
428 {
429         struct drm_file *priv = vma->vm_file->private_data;
430         struct drm_device *dev = priv->minor->dev;
431
432         mutex_lock(&dev->struct_mutex);
433         drm_vm_open_locked(vma);
434         mutex_unlock(&dev->struct_mutex);
435 }
436
437 void drm_vm_close_locked(struct vm_area_struct *vma)
438 {
439         struct drm_file *priv = vma->vm_file->private_data;
440         struct drm_device *dev = priv->minor->dev;
441         struct drm_vma_entry *pt, *temp;
442
443         DRM_DEBUG("0x%08lx,0x%08lx\n",
444                   vma->vm_start, vma->vm_end - vma->vm_start);
445         atomic_dec(&dev->vma_count);
446
447         list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
448                 if (pt->vma == vma) {
449                         list_del(&pt->head);
450                         kfree(pt);
451                         break;
452                 }
453         }
454 }
455
456 /**
457  * \c close method for all virtual memory types.
458  *
459  * \param vma virtual memory area.
460  *
461  * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
462  * free it.
463  */
464 static void drm_vm_close(struct vm_area_struct *vma)
465 {
466         struct drm_file *priv = vma->vm_file->private_data;
467         struct drm_device *dev = priv->minor->dev;
468
469         mutex_lock(&dev->struct_mutex);
470         drm_vm_close_locked(vma);
471         mutex_unlock(&dev->struct_mutex);
472 }
473
474 /**
475  * mmap DMA memory.
476  *
477  * \param file_priv DRM file private.
478  * \param vma virtual memory area.
479  * \return zero on success or a negative number on failure.
480  *
481  * Sets the virtual memory area operations structure to vm_dma_ops, the file
482  * pointer, and calls vm_open().
483  */
484 static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
485 {
486         struct drm_file *priv = filp->private_data;
487         struct drm_device *dev;
488         struct drm_device_dma *dma;
489         unsigned long length = vma->vm_end - vma->vm_start;
490
491         dev = priv->minor->dev;
492         dma = dev->dma;
493         DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
494                   vma->vm_start, vma->vm_end, vma->vm_pgoff);
495
496         /* Length must match exact page count */
497         if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
498                 return -EINVAL;
499         }
500
501         if (!capable(CAP_SYS_ADMIN) &&
502             (dma->flags & _DRM_DMA_USE_PCI_RO)) {
503                 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
504 #if defined(__i386__) || defined(__x86_64__)
505                 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
506 #else
507                 /* Ye gads this is ugly.  With more thought
508                    we could move this up higher and use
509                    `protection_map' instead.  */
510                 vma->vm_page_prot =
511                     __pgprot(pte_val
512                              (pte_wrprotect
513                               (__pte(pgprot_val(vma->vm_page_prot)))));
514 #endif
515         }
516
517         vma->vm_ops = &drm_vm_dma_ops;
518
519         vma->vm_flags |= VM_RESERVED;   /* Don't swap */
520         vma->vm_flags |= VM_DONTEXPAND;
521
522         drm_vm_open_locked(vma);
523         return 0;
524 }
525
526 static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
527 {
528 #ifdef __alpha__
529         return dev->hose->dense_mem_base;
530 #else
531         return 0;
532 #endif
533 }
534
535 /**
536  * mmap DMA memory.
537  *
538  * \param file_priv DRM file private.
539  * \param vma virtual memory area.
540  * \return zero on success or a negative number on failure.
541  *
542  * If the virtual memory area has no offset associated with it then it's a DMA
543  * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
544  * checks that the restricted flag is not set, sets the virtual memory operations
545  * according to the mapping type and remaps the pages. Finally sets the file
546  * pointer and calls vm_open().
547  */
548 int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
549 {
550         struct drm_file *priv = filp->private_data;
551         struct drm_device *dev = priv->minor->dev;
552         struct drm_local_map *map = NULL;
553         resource_size_t offset = 0;
554         struct drm_hash_item *hash;
555
556         DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
557                   vma->vm_start, vma->vm_end, vma->vm_pgoff);
558
559         if (!priv->authenticated)
560                 return -EACCES;
561
562         /* We check for "dma". On Apple's UniNorth, it's valid to have
563          * the AGP mapped at physical address 0
564          * --BenH.
565          */
566         if (!vma->vm_pgoff
567 #if __OS_HAS_AGP
568             && (!dev->agp
569                 || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
570 #endif
571             )
572                 return drm_mmap_dma(filp, vma);
573
574         if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
575                 DRM_ERROR("Could not find map\n");
576                 return -EINVAL;
577         }
578
579         map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
580         if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
581                 return -EPERM;
582
583         /* Check for valid size. */
584         if (map->size < vma->vm_end - vma->vm_start)
585                 return -EINVAL;
586
587         if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
588                 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
589 #if defined(__i386__) || defined(__x86_64__)
590                 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
591 #else
592                 /* Ye gads this is ugly.  With more thought
593                    we could move this up higher and use
594                    `protection_map' instead.  */
595                 vma->vm_page_prot =
596                     __pgprot(pte_val
597                              (pte_wrprotect
598                               (__pte(pgprot_val(vma->vm_page_prot)))));
599 #endif
600         }
601
602         switch (map->type) {
603 #if !defined(__arm__)
604         case _DRM_AGP:
605                 if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
606                         /*
607                          * On some platforms we can't talk to bus dma address from the CPU, so for
608                          * memory of type DRM_AGP, we'll deal with sorting out the real physical
609                          * pages and mappings in fault()
610                          */
611 #if defined(__powerpc__)
612                         pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
613 #endif
614                         vma->vm_ops = &drm_vm_ops;
615                         break;
616                 }
617                 /* fall through to _DRM_FRAME_BUFFER... */
618 #endif
619         case _DRM_FRAME_BUFFER:
620         case _DRM_REGISTERS:
621                 offset = drm_core_get_reg_ofs(dev);
622                 vma->vm_flags |= VM_IO; /* not in core dump */
623                 vma->vm_page_prot = drm_io_prot(map->type, vma);
624 #if !defined(__arm__)
625                 if (io_remap_pfn_range(vma, vma->vm_start,
626                                        (map->offset + offset) >> PAGE_SHIFT,
627                                        vma->vm_end - vma->vm_start,
628                                        vma->vm_page_prot))
629                         return -EAGAIN;
630 #else
631                 if (remap_pfn_range(vma, vma->vm_start,
632                                         (map->offset + offset) >> PAGE_SHIFT,
633                                         vma->vm_end - vma->vm_start,
634                                         vma->vm_page_prot))
635                         return -EAGAIN;
636 #endif
637
638                 DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
639                           " offset = 0x%llx\n",
640                           map->type,
641                           vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
642
643                 vma->vm_ops = &drm_vm_ops;
644                 break;
645         case _DRM_CONSISTENT:
646                 /* Consistent memory is really like shared memory. But
647                  * it's allocated in a different way, so avoid fault */
648                 if (remap_pfn_range(vma, vma->vm_start,
649                     page_to_pfn(virt_to_page(map->handle)),
650                     vma->vm_end - vma->vm_start, vma->vm_page_prot))
651                         return -EAGAIN;
652                 vma->vm_page_prot = drm_dma_prot(map->type, vma);
653         /* fall through to _DRM_SHM */
654         case _DRM_SHM:
655                 vma->vm_ops = &drm_vm_shm_ops;
656                 vma->vm_private_data = (void *)map;
657                 /* Don't let this area swap.  Change when
658                    DRM_KERNEL advisory is supported. */
659                 vma->vm_flags |= VM_RESERVED;
660                 break;
661         case _DRM_SCATTER_GATHER:
662                 vma->vm_ops = &drm_vm_sg_ops;
663                 vma->vm_private_data = (void *)map;
664                 vma->vm_flags |= VM_RESERVED;
665                 vma->vm_page_prot = drm_dma_prot(map->type, vma);
666                 break;
667         default:
668                 return -EINVAL; /* This should never happen. */
669         }
670         vma->vm_flags |= VM_RESERVED;   /* Don't swap */
671         vma->vm_flags |= VM_DONTEXPAND;
672
673         drm_vm_open_locked(vma);
674         return 0;
675 }
676
677 int drm_mmap(struct file *filp, struct vm_area_struct *vma)
678 {
679         struct drm_file *priv = filp->private_data;
680         struct drm_device *dev = priv->minor->dev;
681         int ret;
682
683         if (drm_device_is_unplugged(dev))
684                 return -ENODEV;
685
686         mutex_lock(&dev->struct_mutex);
687         ret = drm_mmap_locked(filp, vma);
688         mutex_unlock(&dev->struct_mutex);
689
690         return ret;
691 }
692 EXPORT_SYMBOL(drm_mmap);