2c24a2f4bd381cb5b0053d6d46bee8f1681afb96
[linux-2.6.git] / drivers / video / tegra / nvmap / nvmap_ioctl.c
1 /*
2  * drivers/video/tegra/nvmap/nvmap_ioctl.c
3  *
4  * User-space interface to nvmap
5  *
6  * Copyright (c) 2011, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include <linux/dma-mapping.h>
24 #include <linux/fs.h>
25 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/uaccess.h>
28
29 #include <asm/cacheflush.h>
30 #include <asm/outercache.h>
31 #include <asm/tlbflush.h>
32
33 #include <mach/iovmm.h>
34 #include <mach/nvmap.h>
35
36 #include "nvmap_ioctl.h"
37 #include "nvmap.h"
38 #include "nvmap_common.h"
39
40 static ssize_t rw_handle(struct nvmap_client *client, struct nvmap_handle *h,
41                          int is_read, unsigned long h_offs,
42                          unsigned long sys_addr, unsigned long h_stride,
43                          unsigned long sys_stride, unsigned long elem_size,
44                          unsigned long count);
45
46 static int cache_maint(struct nvmap_client *client, struct nvmap_handle *h,
47                        unsigned long start, unsigned long end, unsigned int op);
48
49
50 int nvmap_ioctl_pinop(struct file *filp, bool is_pin, void __user *arg)
51 {
52         struct nvmap_pin_handle op;
53         struct nvmap_handle *h;
54         unsigned long on_stack[16];
55         unsigned long *refs;
56         unsigned long __user *output;
57         unsigned int i;
58         int err = 0;
59
60         if (copy_from_user(&op, arg, sizeof(op)))
61                 return -EFAULT;
62
63         if (!op.count)
64                 return -EINVAL;
65
66         if (op.count > 1) {
67                 size_t bytes = op.count * sizeof(unsigned long *);
68
69                 if (op.count > ARRAY_SIZE(on_stack))
70                         refs = kmalloc(op.count * sizeof(*refs), GFP_KERNEL);
71                 else
72                         refs = on_stack;
73
74                 if (!refs)
75                         return -ENOMEM;
76
77                 if (copy_from_user(refs, (void *)op.handles, bytes)) {
78                         err = -EFAULT;
79                         goto out;
80                 }
81         } else {
82                 refs = on_stack;
83                 on_stack[0] = (unsigned long)op.handles;
84         }
85
86         if (is_pin)
87                 err = nvmap_pin_ids(filp->private_data, op.count, refs);
88         else
89                 nvmap_unpin_ids(filp->private_data, op.count, refs);
90
91         /* skip the output stage on unpin */
92         if (err || !is_pin)
93                 goto out;
94
95         /* it is guaranteed that if nvmap_pin_ids returns 0 that
96          * all of the handle_ref objects are valid, so dereferencing
97          * directly here is safe */
98         if (op.count > 1)
99                 output = (unsigned long __user *)op.addr;
100         else {
101                 struct nvmap_pin_handle __user *tmp = arg;
102                 output = (unsigned long __user *)&(tmp->addr);
103         }
104
105         if (!output)
106                 goto out;
107
108         for (i = 0; i < op.count && !err; i++) {
109                 unsigned long addr;
110
111                 h = (struct nvmap_handle *)refs[i];
112
113                 if (h->heap_pgalloc && h->pgalloc.contig)
114                         addr = page_to_phys(h->pgalloc.pages[0]);
115                 else if (h->heap_pgalloc)
116                         addr = h->pgalloc.area->iovm_start;
117                 else
118                         addr = h->carveout->base;
119
120                 err = put_user(addr, &output[i]);
121         }
122
123         if (err)
124                 nvmap_unpin_ids(filp->private_data, op.count, refs);
125
126 out:
127         if (refs != on_stack)
128                 kfree(refs);
129
130         return err;
131 }
132
133 int nvmap_ioctl_getid(struct file *filp, void __user *arg)
134 {
135         struct nvmap_client *client = filp->private_data;
136         struct nvmap_create_handle op;
137         struct nvmap_handle *h = NULL;
138
139         if (copy_from_user(&op, arg, sizeof(op)))
140                 return -EFAULT;
141
142         if (!op.handle)
143                 return -EINVAL;
144
145         h = nvmap_get_handle_id(client, op.handle);
146
147         if (!h)
148                 return -EPERM;
149
150         op.id = (__u32)h;
151         if (client == h->owner)
152                 h->global = true;
153
154         nvmap_handle_put(h);
155
156         return copy_to_user(arg, &op, sizeof(op)) ? -EFAULT : 0;
157 }
158
159 int nvmap_ioctl_alloc(struct file *filp, void __user *arg)
160 {
161         struct nvmap_alloc_handle op;
162         struct nvmap_client *client = filp->private_data;
163
164         if (copy_from_user(&op, arg, sizeof(op)))
165                 return -EFAULT;
166
167         if (!op.handle)
168                 return -EINVAL;
169
170         if (op.align & (op.align - 1))
171                 return -EINVAL;
172
173         /* user-space handles are aligned to page boundaries, to prevent
174          * data leakage. */
175         op.align = max_t(size_t, op.align, PAGE_SIZE);
176
177         return nvmap_alloc_handle_id(client, op.handle, op.heap_mask,
178                                      op.align, op.flags);
179 }
180
181 int nvmap_ioctl_create(struct file *filp, unsigned int cmd, void __user *arg)
182 {
183         struct nvmap_create_handle op;
184         struct nvmap_handle_ref *ref = NULL;
185         struct nvmap_client *client = filp->private_data;
186         int err = 0;
187
188         if (copy_from_user(&op, arg, sizeof(op)))
189                 return -EFAULT;
190
191         if (!client)
192                 return -ENODEV;
193
194         if (cmd == NVMAP_IOC_CREATE) {
195                 ref = nvmap_create_handle(client, PAGE_ALIGN(op.size));
196                 if (!IS_ERR(ref))
197                         ref->handle->orig_size = op.size;
198         } else if (cmd == NVMAP_IOC_FROM_ID) {
199                 ref = nvmap_duplicate_handle_id(client, op.id);
200         } else {
201                 return -EINVAL;
202         }
203
204         if (IS_ERR(ref))
205                 return PTR_ERR(ref);
206
207         op.handle = nvmap_ref_to_id(ref);
208         if (copy_to_user(arg, &op, sizeof(op))) {
209                 err = -EFAULT;
210                 nvmap_free_handle_id(client, op.handle);
211         }
212
213         return err;
214 }
215
216 int nvmap_map_into_caller_ptr(struct file *filp, void __user *arg)
217 {
218         struct nvmap_client *client = filp->private_data;
219         struct nvmap_map_caller op;
220         struct nvmap_vma_priv *vpriv;
221         struct vm_area_struct *vma;
222         struct nvmap_handle *h = NULL;
223         int err = 0;
224
225         if (copy_from_user(&op, arg, sizeof(op)))
226                 return -EFAULT;
227
228         if (!op.handle)
229                 return -EINVAL;
230
231         h = nvmap_get_handle_id(client, op.handle);
232
233         if (!h)
234                 return -EPERM;
235
236         down_read(&current->mm->mmap_sem);
237
238         vma = find_vma(current->mm, op.addr);
239         if (!vma || !vma->vm_private_data) {
240                 err = -ENOMEM;
241                 goto out;
242         }
243
244         if (op.offset & ~PAGE_MASK) {
245                 err = -EFAULT;
246                 goto out;
247         }
248
249         if ((op.offset + op.length) > h->size) {
250                 err = -EADDRNOTAVAIL;
251                 goto out;
252         }
253
254         vpriv = vma->vm_private_data;
255         BUG_ON(!vpriv);
256
257         /* the VMA must exactly match the requested mapping operation, and the
258          * VMA that is targetted must have been created by this driver
259          */
260         if ((vma->vm_start != op.addr) || !is_nvmap_vma(vma) ||
261             (vma->vm_end-vma->vm_start != op.length)) {
262                 err = -EPERM;
263                 goto out;
264         }
265
266         /* verify that each mmap() system call creates a unique VMA */
267
268         if (vpriv->handle && (h == vpriv->handle)) {
269                 goto out;
270         } else if (vpriv->handle) {
271                 err = -EADDRNOTAVAIL;
272                 goto out;
273         }
274
275         nvmap_usecount_inc(h);
276
277         if (!h->heap_pgalloc && (h->carveout->base & ~PAGE_MASK)) {
278                 nvmap_usecount_dec(h);
279                 err = -EFAULT;
280                 goto out;
281         }
282
283         vpriv->handle = h;
284         vpriv->offs = op.offset;
285
286         if (op.flags == NVMAP_HANDLE_INNER_CACHEABLE) {
287                 if (h->orig_size & ~PAGE_MASK) {
288                         pr_err("\n%s:attempt to convert a buffer from uc/wc to"
289                                 " wb, whose size is not a multiple of page size."
290                                 " request ignored.\n", __func__);
291                 } else {
292                         wmb();
293                         /* override allocation time cache coherency attributes. */
294                         h->flags &= (~NVMAP_HANDLE_CACHEABLE);
295                         h->flags |= NVMAP_HANDLE_INNER_CACHEABLE;
296                 }
297         }
298         vma->vm_page_prot = nvmap_pgprot(h, vma->vm_page_prot);
299
300 out:
301         up_read(&current->mm->mmap_sem);
302
303         if (err)
304                 nvmap_handle_put(h);
305         return err;
306 }
307
308 int nvmap_ioctl_get_param(struct file *filp, void __user* arg)
309 {
310         struct nvmap_handle_param op;
311         struct nvmap_client *client = filp->private_data;
312         struct nvmap_handle *h;
313         int err = 0;
314
315         if (copy_from_user(&op, arg, sizeof(op)))
316                 return -EFAULT;
317
318         h = nvmap_get_handle_id(client, op.handle);
319         if (!h)
320                 return -EINVAL;
321
322         switch (op.param) {
323         case NVMAP_HANDLE_PARAM_SIZE:
324                 op.result = h->orig_size;
325                 break;
326         case NVMAP_HANDLE_PARAM_ALIGNMENT:
327                 mutex_lock(&h->lock);
328                 if (!h->alloc)
329                         op.result = 0;
330                 else if (h->heap_pgalloc)
331                         op.result = PAGE_SIZE;
332                 else if (h->carveout->base)
333                         op.result = (h->carveout->base & -h->carveout->base);
334                 else
335                         op.result = SZ_4M;
336                 mutex_unlock(&h->lock);
337                 break;
338         case NVMAP_HANDLE_PARAM_BASE:
339                 if (WARN_ON(!h->alloc || !atomic_add_return(0, &h->pin)))
340                         op.result = -1ul;
341                 else if (!h->heap_pgalloc) {
342                         mutex_lock(&h->lock);
343                         op.result = h->carveout->base;
344                         mutex_unlock(&h->lock);
345                 }
346                 else if (h->pgalloc.contig)
347                         op.result = page_to_phys(h->pgalloc.pages[0]);
348                 else if (h->pgalloc.area)
349                         op.result = h->pgalloc.area->iovm_start;
350                 else
351                         op.result = -1ul;
352                 break;
353         case NVMAP_HANDLE_PARAM_HEAP:
354                 if (!h->alloc)
355                         op.result = 0;
356                 else if (!h->heap_pgalloc) {
357                         mutex_lock(&h->lock);
358                         op.result = nvmap_carveout_usage(client, h->carveout);
359                         mutex_unlock(&h->lock);
360                 }
361                 else if (h->pgalloc.contig)
362                         op.result = NVMAP_HEAP_SYSMEM;
363                 else
364                         op.result = NVMAP_HEAP_IOVMM;
365                 break;
366         default:
367                 err = -EINVAL;
368                 break;
369         }
370
371         if (!err && copy_to_user(arg, &op, sizeof(op)))
372                 err = -EFAULT;
373
374         nvmap_handle_put(h);
375         return err;
376 }
377
378 int nvmap_ioctl_rw_handle(struct file *filp, int is_read, void __user* arg)
379 {
380         struct nvmap_client *client = filp->private_data;
381         struct nvmap_rw_handle __user *uarg = arg;
382         struct nvmap_rw_handle op;
383         struct nvmap_handle *h;
384         ssize_t copied;
385         int err = 0;
386
387         if (copy_from_user(&op, arg, sizeof(op)))
388                 return -EFAULT;
389
390         if (!op.handle || !op.addr || !op.count || !op.elem_size)
391                 return -EINVAL;
392
393         h = nvmap_get_handle_id(client, op.handle);
394         if (!h)
395                 return -EPERM;
396
397         nvmap_usecount_inc(h);
398
399         copied = rw_handle(client, h, is_read, op.offset,
400                            (unsigned long)op.addr, op.hmem_stride,
401                            op.user_stride, op.elem_size, op.count);
402
403         if (copied < 0) {
404                 err = copied;
405                 copied = 0;
406         } else if (copied < (op.count * op.elem_size))
407                 err = -EINTR;
408
409         __put_user(copied, &uarg->count);
410
411         nvmap_usecount_dec(h);
412
413         nvmap_handle_put(h);
414
415         return err;
416 }
417
418 int nvmap_ioctl_cache_maint(struct file *filp, void __user *arg)
419 {
420         struct nvmap_client *client = filp->private_data;
421         struct nvmap_cache_op op;
422         struct vm_area_struct *vma;
423         struct nvmap_vma_priv *vpriv;
424         unsigned long start;
425         unsigned long end;
426         int err = 0;
427
428         if (copy_from_user(&op, arg, sizeof(op)))
429                 return -EFAULT;
430
431         if (!op.handle || !op.addr || op.op < NVMAP_CACHE_OP_WB ||
432             op.op > NVMAP_CACHE_OP_WB_INV)
433                 return -EINVAL;
434
435         down_read(&current->mm->mmap_sem);
436
437         vma = find_vma(current->active_mm, (unsigned long)op.addr);
438         if (!vma || !is_nvmap_vma(vma) ||
439             (unsigned long)op.addr + op.len > vma->vm_end) {
440                 err = -EADDRNOTAVAIL;
441                 goto out;
442         }
443
444         vpriv = (struct nvmap_vma_priv *)vma->vm_private_data;
445
446         if ((unsigned long)vpriv->handle != op.handle) {
447                 err = -EFAULT;
448                 goto out;
449         }
450
451         start = (unsigned long)op.addr - vma->vm_start;
452         end = start + op.len;
453
454         err = cache_maint(client, vpriv->handle, start, end, op.op);
455 out:
456         up_read(&current->mm->mmap_sem);
457         return err;
458 }
459
460 int nvmap_ioctl_free(struct file *filp, unsigned long arg)
461 {
462         struct nvmap_client *client = filp->private_data;
463
464         if (!arg)
465                 return 0;
466
467         nvmap_free_handle_id(client, arg);
468         return 0;
469 }
470
471 static void inner_cache_maint(unsigned int op, void *vaddr, size_t size)
472 {
473         if (op == NVMAP_CACHE_OP_WB_INV)
474                 dmac_flush_range(vaddr, vaddr + size);
475         else if (op == NVMAP_CACHE_OP_INV)
476                 dmac_map_area(vaddr, size, DMA_FROM_DEVICE);
477         else
478                 dmac_map_area(vaddr, size, DMA_TO_DEVICE);
479 }
480
481 static void outer_cache_maint(unsigned int op, unsigned long paddr, size_t size)
482 {
483         if (op == NVMAP_CACHE_OP_WB_INV)
484                 outer_flush_range(paddr, paddr + size);
485         else if (op == NVMAP_CACHE_OP_INV)
486                 outer_inv_range(paddr, paddr + size);
487         else
488                 outer_clean_range(paddr, paddr + size);
489 }
490
491 static void heap_page_cache_maint(struct nvmap_client *client,
492         struct nvmap_handle *h, unsigned long start, unsigned long end,
493         unsigned int op, bool inner, bool outer, pte_t **pte,
494         unsigned long kaddr, pgprot_t prot)
495 {
496         struct page *page;
497         unsigned long paddr;
498         unsigned long next;
499         unsigned long off;
500         size_t size;
501
502         while (start < end) {
503                 page = h->pgalloc.pages[start >> PAGE_SHIFT];
504                 next = min(((start + PAGE_SIZE) & PAGE_MASK), end);
505                 off = start & ~PAGE_MASK;
506                 size = next - start;
507                 paddr = page_to_phys(page) + off;
508
509                 if (inner) {
510                         void *vaddr = (void *)kaddr + off;
511                         BUG_ON(!pte);
512                         BUG_ON(!kaddr);
513                         set_pte_at(&init_mm, kaddr, *pte,
514                                 pfn_pte(__phys_to_pfn(paddr), prot));
515                         flush_tlb_kernel_page(kaddr);
516                         inner_cache_maint(op, vaddr, size);
517                 }
518
519                 if (outer)
520                         outer_cache_maint(op, paddr, size);
521                 start = next;
522         }
523 }
524
525 static bool fast_cache_maint(struct nvmap_client *client, struct nvmap_handle *h,
526         unsigned long start, unsigned long end, unsigned int op)
527 {
528         int ret = false;
529
530         if ( (op == NVMAP_CACHE_OP_INV) ||
531                 ((end - start) < FLUSH_CLEAN_BY_SET_WAY_THRESHOLD) )
532                 goto out;
533
534         if (op == NVMAP_CACHE_OP_WB_INV) {
535                 inner_flush_cache_all();
536         } else if (op == NVMAP_CACHE_OP_WB) {
537                 inner_clean_cache_all();
538         }
539
540         if (h->heap_pgalloc && (h->flags != NVMAP_HANDLE_INNER_CACHEABLE)) {
541                 heap_page_cache_maint(client, h, start, end, op,
542                                 false, true, NULL, 0, 0);
543         } else if (h->flags != NVMAP_HANDLE_INNER_CACHEABLE) {
544                 start += h->carveout->base;
545                 end += h->carveout->base;
546                 outer_cache_maint(op, start, end - start);
547         }
548         ret = true;
549 out:
550         return ret;
551 }
552
553 static int cache_maint(struct nvmap_client *client, struct nvmap_handle *h,
554                        unsigned long start, unsigned long end, unsigned int op)
555 {
556         pgprot_t prot;
557         pte_t **pte = NULL;
558         unsigned long kaddr;
559         unsigned long loop;
560         int err = 0;
561
562         h = nvmap_handle_get(h);
563         if (!h)
564                 return -EFAULT;
565
566         if (!h->alloc) {
567                 err = -EFAULT;
568                 goto out;
569         }
570
571         if (h->flags == NVMAP_HANDLE_UNCACHEABLE ||
572             h->flags == NVMAP_HANDLE_WRITE_COMBINE ||
573             start == end)
574                 goto out;
575
576         if (fast_cache_maint(client, h, start, end, op))
577                 goto out;
578
579         prot = nvmap_pgprot(h, pgprot_kernel);
580         pte = nvmap_alloc_pte(client->dev, (void **)&kaddr);
581         if (IS_ERR(pte)) {
582                 err = PTR_ERR(pte);
583                 pte = NULL;
584                 goto out;
585         }
586
587         if (h->heap_pgalloc) {
588                 heap_page_cache_maint(client, h, start, end, op, true,
589                         (h->flags == NVMAP_HANDLE_INNER_CACHEABLE) ? false : true,
590                         pte, kaddr, prot);
591                 goto out;
592         }
593
594         if (start > h->size || end > h->size) {
595                 nvmap_warn(client, "cache maintenance outside handle\n");
596                 return -EINVAL;
597         }
598
599         /* lock carveout from relocation by mapcount */
600         nvmap_usecount_inc(h);
601
602         start += h->carveout->base;
603         end += h->carveout->base;
604
605         loop = start;
606
607         while (loop < end) {
608                 unsigned long next = (loop + PAGE_SIZE) & PAGE_MASK;
609                 void *base = (void *)kaddr + (loop & ~PAGE_MASK);
610                 next = min(next, end);
611
612                 set_pte_at(&init_mm, kaddr, *pte,
613                            pfn_pte(__phys_to_pfn(loop), prot));
614                 flush_tlb_kernel_page(kaddr);
615
616                 inner_cache_maint(op, base, next - loop);
617                 loop = next;
618         }
619
620         if (h->flags != NVMAP_HANDLE_INNER_CACHEABLE)
621                 outer_cache_maint(op, start, end - start);
622
623         /* unlock carveout */
624         nvmap_usecount_dec(h);
625
626 out:
627         if (pte)
628                 nvmap_free_pte(client->dev, pte);
629         nvmap_handle_put(h);
630         wmb();
631         return err;
632 }
633
634 static int rw_handle_page(struct nvmap_handle *h, int is_read,
635                           unsigned long start, unsigned long rw_addr,
636                           unsigned long bytes, unsigned long kaddr, pte_t *pte)
637 {
638         pgprot_t prot = nvmap_pgprot(h, pgprot_kernel);
639         unsigned long end = start + bytes;
640         int err = 0;
641
642         while (!err && start < end) {
643                 struct page *page = NULL;
644                 unsigned long phys;
645                 size_t count;
646                 void *src;
647
648                 if (!h->heap_pgalloc) {
649                         phys = h->carveout->base + start;
650                 } else {
651                         page = h->pgalloc.pages[start >> PAGE_SHIFT];
652                         BUG_ON(!page);
653                         get_page(page);
654                         phys = page_to_phys(page) + (start & ~PAGE_MASK);
655                 }
656
657                 set_pte_at(&init_mm, kaddr, pte,
658                            pfn_pte(__phys_to_pfn(phys), prot));
659                 flush_tlb_kernel_page(kaddr);
660
661                 src = (void *)kaddr + (phys & ~PAGE_MASK);
662                 phys = PAGE_SIZE - (phys & ~PAGE_MASK);
663                 count = min_t(size_t, end - start, phys);
664
665                 if (is_read)
666                         err = copy_to_user((void *)rw_addr, src, count);
667                 else
668                         err = copy_from_user(src, (void *)rw_addr, count);
669
670                 if (err)
671                         err = -EFAULT;
672
673                 rw_addr += count;
674                 start += count;
675
676                 if (page)
677                         put_page(page);
678         }
679
680         return err;
681 }
682
683 static ssize_t rw_handle(struct nvmap_client *client, struct nvmap_handle *h,
684                          int is_read, unsigned long h_offs,
685                          unsigned long sys_addr, unsigned long h_stride,
686                          unsigned long sys_stride, unsigned long elem_size,
687                          unsigned long count)
688 {
689         ssize_t copied = 0;
690         pte_t **pte;
691         void *addr;
692         int ret = 0;
693
694         if (!elem_size)
695                 return -EINVAL;
696
697         if (!h->alloc)
698                 return -EFAULT;
699
700         if (elem_size == h_stride && elem_size == sys_stride) {
701                 elem_size *= count;
702                 h_stride = elem_size;
703                 sys_stride = elem_size;
704                 count = 1;
705         }
706
707         pte = nvmap_alloc_pte(client->dev, &addr);
708         if (IS_ERR(pte))
709                 return PTR_ERR(pte);
710
711         while (count--) {
712                 if (h_offs + elem_size > h->size) {
713                         nvmap_warn(client, "read/write outside of handle\n");
714                         ret = -EFAULT;
715                         break;
716                 }
717
718                 if(is_read)
719                     cache_maint(client, h, h_offs,
720                                 h_offs + elem_size, NVMAP_CACHE_OP_INV);
721
722                 ret = rw_handle_page(h, is_read, h_offs, sys_addr,
723                                      elem_size, (unsigned long)addr, *pte);
724
725                 if (ret)
726                         break;
727
728                 if(!is_read)
729                     cache_maint(client, h, h_offs,
730                                 h_offs + elem_size, NVMAP_CACHE_OP_WB);
731
732                 copied += elem_size;
733                 sys_addr += sys_stride;
734                 h_offs += h_stride;
735         }
736
737         nvmap_free_pte(client->dev, pte);
738         return ret ?: copied;
739 }