ae2691942ddb16b2ffdd3a5f4703a7ddb95b694f
[linux-2.6.git] / drivers / char / drm / drm_vm.c
1 /**
2  * \file drm_vm.c
3  * Memory mapping for DRM
4  *
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  */
8
9 /*
10  * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
11  *
12  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14  * All Rights Reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  */
35
36 #include "drmP.h"
37 #if defined(__ia64__)
38 #include <linux/efi.h>
39 #endif
40
41 static void drm_vm_open(struct vm_area_struct *vma);
42 static void drm_vm_close(struct vm_area_struct *vma);
43
44 /**
45  * \c nopage method for AGP virtual memory.
46  *
47  * \param vma virtual memory area.
48  * \param address access address.
49  * \return pointer to the page structure.
50  *
51  * Find the right map and if it's AGP memory find the real physical page to
52  * map, get the page, increment the use count and return it.
53  */
54 #if __OS_HAS_AGP
55 static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
56                                                 unsigned long address)
57 {
58         drm_file_t *priv = vma->vm_file->private_data;
59         drm_device_t *dev = priv->head->dev;
60         drm_map_t *map = NULL;
61         drm_map_list_t *r_list;
62         drm_hash_item_t *hash;
63
64         /*
65          * Find the right map
66          */
67         if (!drm_core_has_AGP(dev))
68                 goto vm_nopage_error;
69
70         if (!dev->agp || !dev->agp->cant_use_aperture)
71                 goto vm_nopage_error;
72
73         if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff << PAGE_SHIFT, &hash))
74                 goto vm_nopage_error;
75
76         r_list = drm_hash_entry(hash, drm_map_list_t, hash);
77         map = r_list->map;
78
79         if (map && map->type == _DRM_AGP) {
80                 unsigned long offset = address - vma->vm_start;
81                 unsigned long baddr = map->offset + offset;
82                 struct drm_agp_mem *agpmem;
83                 struct page *page;
84
85 #ifdef __alpha__
86                 /*
87                  * Adjust to a bus-relative address
88                  */
89                 baddr -= dev->hose->mem_space->start;
90 #endif
91
92                 /*
93                  * It's AGP memory - find the real physical page to map
94                  */
95                 for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next) {
96                         if (agpmem->bound <= baddr &&
97                             agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
98                                 break;
99                 }
100
101                 if (!agpmem)
102                         goto vm_nopage_error;
103
104                 /*
105                  * Get the page, inc the use count, and return it
106                  */
107                 offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
108                 page = virt_to_page(__va(agpmem->memory->memory[offset]));
109                 get_page(page);
110
111                 DRM_DEBUG
112                     ("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n",
113                      baddr, __va(agpmem->memory->memory[offset]), offset,
114                      page_count(page));
115
116                 return page;
117         }
118       vm_nopage_error:
119         return NOPAGE_SIGBUS;   /* Disallow mremap */
120 }
121 #else                           /* __OS_HAS_AGP */
122 static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
123                                                 unsigned long address)
124 {
125         return NOPAGE_SIGBUS;
126 }
127 #endif                          /* __OS_HAS_AGP */
128
129 /**
130  * \c nopage method for shared virtual memory.
131  *
132  * \param vma virtual memory area.
133  * \param address access address.
134  * \return pointer to the page structure.
135  *
136  * Get the the mapping, find the real physical page to map, get the page, and
137  * return it.
138  */
139 static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
140                                                     unsigned long address)
141 {
142         drm_map_t *map = (drm_map_t *) vma->vm_private_data;
143         unsigned long offset;
144         unsigned long i;
145         struct page *page;
146
147         if (address > vma->vm_end)
148                 return NOPAGE_SIGBUS;   /* Disallow mremap */
149         if (!map)
150                 return NOPAGE_SIGBUS;   /* Nothing allocated */
151
152         offset = address - vma->vm_start;
153         i = (unsigned long)map->handle + offset;
154         page = (map->type == _DRM_CONSISTENT) ?
155                 virt_to_page((void *)i) : vmalloc_to_page((void *)i);
156         if (!page)
157                 return NOPAGE_SIGBUS;
158         get_page(page);
159
160         DRM_DEBUG("shm_nopage 0x%lx\n", address);
161         return page;
162 }
163
164 /**
165  * \c close method for shared virtual memory.
166  *
167  * \param vma virtual memory area.
168  *
169  * Deletes map information if we are the last
170  * person to close a mapping and it's not in the global maplist.
171  */
172 static void drm_vm_shm_close(struct vm_area_struct *vma)
173 {
174         drm_file_t *priv = vma->vm_file->private_data;
175         drm_device_t *dev = priv->head->dev;
176         drm_vma_entry_t *pt, *prev, *next;
177         drm_map_t *map;
178         drm_map_list_t *r_list;
179         struct list_head *list;
180         int found_maps = 0;
181
182         DRM_DEBUG("0x%08lx,0x%08lx\n",
183                   vma->vm_start, vma->vm_end - vma->vm_start);
184         atomic_dec(&dev->vma_count);
185
186         map = vma->vm_private_data;
187
188         mutex_lock(&dev->struct_mutex);
189         for (pt = dev->vmalist, prev = NULL; pt; pt = next) {
190                 next = pt->next;
191                 if (pt->vma->vm_private_data == map)
192                         found_maps++;
193                 if (pt->vma == vma) {
194                         if (prev) {
195                                 prev->next = pt->next;
196                         } else {
197                                 dev->vmalist = pt->next;
198                         }
199                         drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
200                 } else {
201                         prev = pt;
202                 }
203         }
204         /* We were the only map that was found */
205         if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
206                 /* Check to see if we are in the maplist, if we are not, then
207                  * we delete this mappings information.
208                  */
209                 found_maps = 0;
210                 list = &dev->maplist->head;
211                 list_for_each(list, &dev->maplist->head) {
212                         r_list = list_entry(list, drm_map_list_t, head);
213                         if (r_list->map == map)
214                                 found_maps++;
215                 }
216
217                 if (!found_maps) {
218                         drm_dma_handle_t dmah;
219
220                         switch (map->type) {
221                         case _DRM_REGISTERS:
222                         case _DRM_FRAME_BUFFER:
223                                 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
224                                         int retcode;
225                                         retcode = mtrr_del(map->mtrr,
226                                                            map->offset,
227                                                            map->size);
228                                         DRM_DEBUG("mtrr_del = %d\n", retcode);
229                                 }
230                                 drm_ioremapfree(map->handle, map->size, dev);
231                                 break;
232                         case _DRM_SHM:
233                                 vfree(map->handle);
234                                 break;
235                         case _DRM_AGP:
236                         case _DRM_SCATTER_GATHER:
237                                 break;
238                         case _DRM_CONSISTENT:
239                                 dmah.vaddr = map->handle;
240                                 dmah.busaddr = map->offset;
241                                 dmah.size = map->size;
242                                 __drm_pci_free(dev, &dmah);
243                                 break;
244                         }
245                         drm_free(map, sizeof(*map), DRM_MEM_MAPS);
246                 }
247         }
248         mutex_unlock(&dev->struct_mutex);
249 }
250
251 /**
252  * \c nopage method for DMA virtual memory.
253  *
254  * \param vma virtual memory area.
255  * \param address access address.
256  * \return pointer to the page structure.
257  *
258  * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
259  */
260 static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,
261                                                     unsigned long address)
262 {
263         drm_file_t *priv = vma->vm_file->private_data;
264         drm_device_t *dev = priv->head->dev;
265         drm_device_dma_t *dma = dev->dma;
266         unsigned long offset;
267         unsigned long page_nr;
268         struct page *page;
269
270         if (!dma)
271                 return NOPAGE_SIGBUS;   /* Error */
272         if (address > vma->vm_end)
273                 return NOPAGE_SIGBUS;   /* Disallow mremap */
274         if (!dma->pagelist)
275                 return NOPAGE_SIGBUS;   /* Nothing allocated */
276
277         offset = address - vma->vm_start;       /* vm_[pg]off[set] should be 0 */
278         page_nr = offset >> PAGE_SHIFT;
279         page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
280
281         get_page(page);
282
283         DRM_DEBUG("dma_nopage 0x%lx (page %lu)\n", address, page_nr);
284         return page;
285 }
286
287 /**
288  * \c nopage method for scatter-gather virtual memory.
289  *
290  * \param vma virtual memory area.
291  * \param address access address.
292  * \return pointer to the page structure.
293  *
294  * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
295  */
296 static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
297                                                    unsigned long address)
298 {
299         drm_map_t *map = (drm_map_t *) vma->vm_private_data;
300         drm_file_t *priv = vma->vm_file->private_data;
301         drm_device_t *dev = priv->head->dev;
302         drm_sg_mem_t *entry = dev->sg;
303         unsigned long offset;
304         unsigned long map_offset;
305         unsigned long page_offset;
306         struct page *page;
307
308         if (!entry)
309                 return NOPAGE_SIGBUS;   /* Error */
310         if (address > vma->vm_end)
311                 return NOPAGE_SIGBUS;   /* Disallow mremap */
312         if (!entry->pagelist)
313                 return NOPAGE_SIGBUS;   /* Nothing allocated */
314
315         offset = address - vma->vm_start;
316         map_offset = map->offset - (unsigned long)dev->sg->virtual;
317         page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
318         page = entry->pagelist[page_offset];
319         get_page(page);
320
321         return page;
322 }
323
324 static struct page *drm_vm_nopage(struct vm_area_struct *vma,
325                                   unsigned long address, int *type)
326 {
327         if (type)
328                 *type = VM_FAULT_MINOR;
329         return drm_do_vm_nopage(vma, address);
330 }
331
332 static struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
333                                       unsigned long address, int *type)
334 {
335         if (type)
336                 *type = VM_FAULT_MINOR;
337         return drm_do_vm_shm_nopage(vma, address);
338 }
339
340 static struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
341                                       unsigned long address, int *type)
342 {
343         if (type)
344                 *type = VM_FAULT_MINOR;
345         return drm_do_vm_dma_nopage(vma, address);
346 }
347
348 static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
349                                      unsigned long address, int *type)
350 {
351         if (type)
352                 *type = VM_FAULT_MINOR;
353         return drm_do_vm_sg_nopage(vma, address);
354 }
355
356 /** AGP virtual memory operations */
357 static struct vm_operations_struct drm_vm_ops = {
358         .nopage = drm_vm_nopage,
359         .open = drm_vm_open,
360         .close = drm_vm_close,
361 };
362
363 /** Shared virtual memory operations */
364 static struct vm_operations_struct drm_vm_shm_ops = {
365         .nopage = drm_vm_shm_nopage,
366         .open = drm_vm_open,
367         .close = drm_vm_shm_close,
368 };
369
370 /** DMA virtual memory operations */
371 static struct vm_operations_struct drm_vm_dma_ops = {
372         .nopage = drm_vm_dma_nopage,
373         .open = drm_vm_open,
374         .close = drm_vm_close,
375 };
376
377 /** Scatter-gather virtual memory operations */
378 static struct vm_operations_struct drm_vm_sg_ops = {
379         .nopage = drm_vm_sg_nopage,
380         .open = drm_vm_open,
381         .close = drm_vm_close,
382 };
383
384 /**
385  * \c open method for shared virtual memory.
386  *
387  * \param vma virtual memory area.
388  *
389  * Create a new drm_vma_entry structure as the \p vma private data entry and
390  * add it to drm_device::vmalist.
391  */
392 static void drm_vm_open(struct vm_area_struct *vma)
393 {
394         drm_file_t *priv = vma->vm_file->private_data;
395         drm_device_t *dev = priv->head->dev;
396         drm_vma_entry_t *vma_entry;
397
398         DRM_DEBUG("0x%08lx,0x%08lx\n",
399                   vma->vm_start, vma->vm_end - vma->vm_start);
400         atomic_inc(&dev->vma_count);
401
402         vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
403         if (vma_entry) {
404                 mutex_lock(&dev->struct_mutex);
405                 vma_entry->vma = vma;
406                 vma_entry->next = dev->vmalist;
407                 vma_entry->pid = current->pid;
408                 dev->vmalist = vma_entry;
409                 mutex_unlock(&dev->struct_mutex);
410         }
411 }
412
413 /**
414  * \c close method for all virtual memory types.
415  *
416  * \param vma virtual memory area.
417  *
418  * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
419  * free it.
420  */
421 static void drm_vm_close(struct vm_area_struct *vma)
422 {
423         drm_file_t *priv = vma->vm_file->private_data;
424         drm_device_t *dev = priv->head->dev;
425         drm_vma_entry_t *pt, *prev;
426
427         DRM_DEBUG("0x%08lx,0x%08lx\n",
428                   vma->vm_start, vma->vm_end - vma->vm_start);
429         atomic_dec(&dev->vma_count);
430
431         mutex_lock(&dev->struct_mutex);
432         for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
433                 if (pt->vma == vma) {
434                         if (prev) {
435                                 prev->next = pt->next;
436                         } else {
437                                 dev->vmalist = pt->next;
438                         }
439                         drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
440                         break;
441                 }
442         }
443         mutex_unlock(&dev->struct_mutex);
444 }
445
446 /**
447  * mmap DMA memory.
448  *
449  * \param filp file pointer.
450  * \param vma virtual memory area.
451  * \return zero on success or a negative number on failure.
452  *
453  * Sets the virtual memory area operations structure to vm_dma_ops, the file
454  * pointer, and calls vm_open().
455  */
456 static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
457 {
458         drm_file_t *priv = filp->private_data;
459         drm_device_t *dev;
460         drm_device_dma_t *dma;
461         unsigned long length = vma->vm_end - vma->vm_start;
462
463         lock_kernel();
464         dev = priv->head->dev;
465         dma = dev->dma;
466         DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
467                   vma->vm_start, vma->vm_end, vma->vm_pgoff << PAGE_SHIFT);
468
469         /* Length must match exact page count */
470         if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
471                 unlock_kernel();
472                 return -EINVAL;
473         }
474         unlock_kernel();
475
476         vma->vm_ops = &drm_vm_dma_ops;
477
478         vma->vm_flags |= VM_RESERVED;   /* Don't swap */
479
480         vma->vm_file = filp;    /* Needed for drm_vm_open() */
481         drm_vm_open(vma);
482         return 0;
483 }
484
485 unsigned long drm_core_get_map_ofs(drm_map_t * map)
486 {
487         return map->offset;
488 }
489
490 EXPORT_SYMBOL(drm_core_get_map_ofs);
491
492 unsigned long drm_core_get_reg_ofs(struct drm_device *dev)
493 {
494 #ifdef __alpha__
495         return dev->hose->dense_mem_base - dev->hose->mem_space->start;
496 #else
497         return 0;
498 #endif
499 }
500
501 EXPORT_SYMBOL(drm_core_get_reg_ofs);
502
503 /**
504  * mmap DMA memory.
505  *
506  * \param filp file pointer.
507  * \param vma virtual memory area.
508  * \return zero on success or a negative number on failure.
509  *
510  * If the virtual memory area has no offset associated with it then it's a DMA
511  * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
512  * checks that the restricted flag is not set, sets the virtual memory operations
513  * according to the mapping type and remaps the pages. Finally sets the file
514  * pointer and calls vm_open().
515  */
516 int drm_mmap(struct file *filp, struct vm_area_struct *vma)
517 {
518         drm_file_t *priv = filp->private_data;
519         drm_device_t *dev = priv->head->dev;
520         drm_map_t *map = NULL;
521         unsigned long offset = 0;
522         drm_hash_item_t *hash;
523
524         DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
525                   vma->vm_start, vma->vm_end, vma->vm_pgoff << PAGE_SHIFT);
526
527         if (!priv->authenticated)
528                 return -EACCES;
529
530         /* We check for "dma". On Apple's UniNorth, it's valid to have
531          * the AGP mapped at physical address 0
532          * --BenH.
533          */
534         if (!(vma->vm_pgoff << PAGE_SHIFT)
535 #if __OS_HAS_AGP
536             && (!dev->agp
537                 || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
538 #endif
539             )
540                 return drm_mmap_dma(filp, vma);
541
542         if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff << PAGE_SHIFT, &hash)) {
543                 DRM_ERROR("Could not find map\n");
544                 return -EINVAL;
545         }
546
547         map = drm_hash_entry(hash, drm_map_list_t, hash)->map;
548         if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
549                 return -EPERM;
550
551         /* Check for valid size. */
552         if (map->size != vma->vm_end - vma->vm_start)
553                 return -EINVAL;
554
555         if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
556                 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
557 #if defined(__i386__) || defined(__x86_64__)
558                 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
559 #else
560                 /* Ye gads this is ugly.  With more thought
561                    we could move this up higher and use
562                    `protection_map' instead.  */
563                 vma->vm_page_prot =
564                     __pgprot(pte_val
565                              (pte_wrprotect
566                               (__pte(pgprot_val(vma->vm_page_prot)))));
567 #endif
568         }
569
570         switch (map->type) {
571         case _DRM_AGP:
572                 if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
573                         /*
574                          * On some platforms we can't talk to bus dma address from the CPU, so for
575                          * memory of type DRM_AGP, we'll deal with sorting out the real physical
576                          * pages and mappings in nopage()
577                          */
578 #if defined(__powerpc__)
579                         pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
580 #endif
581                         vma->vm_ops = &drm_vm_ops;
582                         break;
583                 }
584                 /* fall through to _DRM_FRAME_BUFFER... */
585         case _DRM_FRAME_BUFFER:
586         case _DRM_REGISTERS:
587 #if defined(__i386__) || defined(__x86_64__)
588                 if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) {
589                         pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
590                         pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
591                 }
592 #elif defined(__powerpc__)
593                 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
594                 if (map->type == _DRM_REGISTERS)
595                         pgprot_val(vma->vm_page_prot) |= _PAGE_GUARDED;
596 #endif
597                 vma->vm_flags |= VM_IO; /* not in core dump */
598 #if defined(__ia64__)
599                 if (efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start))
600                         vma->vm_page_prot =
601                             pgprot_writecombine(vma->vm_page_prot);
602                 else
603                         vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
604 #endif
605                 offset = dev->driver->get_reg_ofs(dev);
606 #ifdef __sparc__
607                 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
608                 if (io_remap_pfn_range(vma, vma->vm_start,
609                                        (map->offset + offset) >> PAGE_SHIFT,
610                                        vma->vm_end - vma->vm_start,
611                                        vma->vm_page_prot))
612 #else
613                 if (io_remap_pfn_range(vma, vma->vm_start,
614                                        (map->offset + offset) >> PAGE_SHIFT,
615                                        vma->vm_end - vma->vm_start,
616                                        vma->vm_page_prot))
617 #endif
618                         return -EAGAIN;
619                 DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
620                           " offset = 0x%lx\n",
621                           map->type,
622                           vma->vm_start, vma->vm_end, map->offset + offset);
623                 vma->vm_ops = &drm_vm_ops;
624                 break;
625         case _DRM_SHM:
626         case _DRM_CONSISTENT:
627                 /* Consistent memory is really like shared memory. It's only
628                  * allocate in a different way */
629                 vma->vm_ops = &drm_vm_shm_ops;
630                 vma->vm_private_data = (void *)map;
631                 /* Don't let this area swap.  Change when
632                    DRM_KERNEL advisory is supported. */
633                 vma->vm_flags |= VM_RESERVED;
634                 break;
635         case _DRM_SCATTER_GATHER:
636                 vma->vm_ops = &drm_vm_sg_ops;
637                 vma->vm_private_data = (void *)map;
638                 vma->vm_flags |= VM_RESERVED;
639                 break;
640         default:
641                 return -EINVAL; /* This should never happen. */
642         }
643         vma->vm_flags |= VM_RESERVED;   /* Don't swap */
644
645         vma->vm_file = filp;    /* Needed for drm_vm_open() */
646         drm_vm_open(vma);
647         return 0;
648 }
649
650 EXPORT_SYMBOL(drm_mmap);