be0b1d258926fce3d9e9407b5f710cc36c20dbac
[linux-3.10.git] / drivers / video / tegra / nvmap / nvmap_ioctl.c
1 /*
2  * drivers/video/tegra/nvmap/nvmap_ioctl.c
3  *
4  * User-space interface to nvmap
5  *
6  * Copyright (c) 2011-2013, NVIDIA CORPORATION. All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include <linux/anon_inodes.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/export.h>
26 #include <linux/fs.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/uaccess.h>
30 #include <linux/nvmap.h>
31
32 #include <asm/cacheflush.h>
33 #include <asm/memory.h>
34 #ifndef CONFIG_ARM64
35 #include <asm/outercache.h>
36 #endif
37 #include <asm/tlbflush.h>
38
39 #include <mach/iovmm.h>
40 #include <trace/events/nvmap.h>
41 #include <linux/vmalloc.h>
42
43 #include "nvmap_ioctl.h"
44 #include "nvmap_priv.h"
45
46 #include <linux/list.h>
47
48 #define CACHE_MAINT_IMMEDIATE           0
49 #define CACHE_MAINT_ALLOW_DEFERRED      1
50
51 #define FLUSH_ALL_HANDLES               0
52
53 static ssize_t rw_handle(struct nvmap_client *client, struct nvmap_handle *h,
54                          int is_read, unsigned long h_offs,
55                          unsigned long sys_addr, unsigned long h_stride,
56                          unsigned long sys_stride, unsigned long elem_size,
57                          unsigned long count);
58
59 static int cache_maint(struct nvmap_client *client, struct nvmap_handle *h,
60                        unsigned long start, unsigned long end, unsigned int op,
61                        unsigned int allow_deferred);
62
63 #ifdef CONFIG_COMPAT
64 ulong unmarshal_user_handle(__u32 handle)
65 {
66         ulong h = (handle | PAGE_OFFSET);
67
68         return h;
69 }
70
71 __u32 marshal_kernel_handle(ulong handle)
72 {
73         return (__u32)handle;
74 }
75
76 ulong unmarshal_user_id(u32 id)
77 {
78         return unmarshal_user_handle(id);
79 }
80
81 #else
82 #define NVMAP_XOR_HASH_MASK 0xFFFFFFFC
83 ulong unmarshal_user_handle(struct nvmap_handle *handle)
84 {
85         if ((ulong)handle == 0)
86                 return (ulong)handle;
87
88 #ifdef CONFIG_NVMAP_HANDLE_MARSHAL
89         return (ulong)handle ^ NVMAP_XOR_HASH_MASK;
90 #else
91         return (ulong)handle;
92 #endif
93 }
94
95 struct nvmap_handle *marshal_kernel_handle(ulong handle)
96 {
97         if (handle == 0)
98                 return (struct nvmap_handle *)handle;
99
100 #ifdef CONFIG_NVMAP_HANDLE_MARSHAL
101         return (struct nvmap_handle *)(handle ^ NVMAP_XOR_HASH_MASK);
102 #else
103         return (struct nvmap_handle *)handle;
104 #endif
105 }
106
107 ulong unmarshal_user_id(ulong id)
108 {
109         return unmarshal_user_handle((struct nvmap_handle *)id);
110 }
111
112 #endif
113
114 ulong nvmap_ref_to_user_id(struct nvmap_handle_ref *ref)
115 {
116         if (!virt_addr_valid(ref))
117                 return 0;
118         return (ulong)marshal_kernel_handle(nvmap_ref_to_id(ref));
119 }
120
121 int nvmap_ioctl_pinop(struct file *filp, bool is_pin, void __user *arg)
122 {
123         struct nvmap_pin_handle op;
124         struct nvmap_handle *h;
125         unsigned long on_stack[16];
126         unsigned long *refs;
127         unsigned long __user *output;
128         unsigned int i;
129         int err = 0;
130
131         if (copy_from_user(&op, arg, sizeof(op)))
132                 return -EFAULT;
133
134         if (!op.count)
135                 return -EINVAL;
136
137         if (op.count > 1) {
138                 size_t bytes = op.count * sizeof(*refs); /* kcalloc below will catch overflow. */
139
140                 if (op.count > ARRAY_SIZE(on_stack))
141                         refs = kcalloc(op.count, sizeof(*refs), GFP_KERNEL);
142                 else
143                         refs = on_stack;
144
145                 if (!refs)
146                         return -ENOMEM;
147
148                 if (!access_ok(VERIFY_READ, op.handles, bytes)) {
149                         err = -EFAULT;
150                         goto out;
151                 }
152
153                 for (i = 0; i < op.count; i++)
154                         refs[i] = unmarshal_user_handle(op.handles[i]);
155         } else {
156                 refs = on_stack;
157                 on_stack[0] = unmarshal_user_handle(
158                                 (typeof(*op.handles))op.handles);
159         }
160
161         trace_nvmap_ioctl_pinop(filp->private_data, is_pin, op.count, refs);
162         if (is_pin)
163                 err = nvmap_pin_ids(filp->private_data, op.count, refs);
164         else
165                 nvmap_unpin_ids(filp->private_data, op.count, refs);
166
167         /* skip the output stage on unpin */
168         if (err || !is_pin)
169                 goto out;
170
171         /* it is guaranteed that if nvmap_pin_ids returns 0 that
172          * all of the handle_ref objects are valid, so dereferencing
173          * directly here is safe */
174         if (op.count > 1)
175                 output = (unsigned long __user *)op.addr;
176         else {
177                 struct nvmap_pin_handle __user *tmp = arg;
178                 output = (unsigned long __user *)&(tmp->addr);
179         }
180
181         if (!output)
182                 goto out;
183
184         for (i = 0; i < op.count && !err; i++) {
185                 unsigned long addr;
186
187                 h = (struct nvmap_handle *)refs[i];
188
189                 if (h->heap_pgalloc && h->pgalloc.contig)
190                         addr = page_to_phys(h->pgalloc.pages[0]);
191                 else if (h->heap_pgalloc)
192                         addr = h->pgalloc.area->iovm_start;
193                 else
194                         addr = h->carveout->base;
195
196                 err = put_user(addr, &output[i]);
197         }
198
199         if (err)
200                 nvmap_unpin_ids(filp->private_data, op.count, refs);
201
202 out:
203         if (refs != on_stack)
204                 kfree(refs);
205
206         return err;
207 }
208
209 int nvmap_ioctl_getid(struct file *filp, void __user *arg)
210 {
211         struct nvmap_client *client = filp->private_data;
212         struct nvmap_create_handle op;
213         struct nvmap_handle *h = NULL;
214         ulong handle;
215
216         if (copy_from_user(&op, arg, sizeof(op)))
217                 return -EFAULT;
218
219         handle = unmarshal_user_handle(op.handle);
220         if (!handle)
221                 return -EINVAL;
222
223         h = nvmap_get_handle_id(client, handle);
224
225         if (!h)
226                 return -EPERM;
227
228         op.id = (__u32)marshal_kernel_handle((ulong)h);
229         if (client == h->owner)
230                 h->global = true;
231
232         nvmap_handle_put(h);
233
234         return copy_to_user(arg, &op, sizeof(op)) ? -EFAULT : 0;
235 }
236
237 static int nvmap_share_release(struct inode *inode, struct file *file)
238 {
239         struct nvmap_handle *h = file->private_data;
240
241         nvmap_handle_put(h);
242         return 0;
243 }
244
245 static int nvmap_share_mmap(struct file *file, struct vm_area_struct *vma)
246 {
247         /* unsupported operation */
248         WARN(1, "mmap is not supported on fd, which shares nvmap handle");
249         return -EPERM;
250 }
251
252 const struct file_operations nvmap_fd_fops = {
253         .owner          = THIS_MODULE,
254         .release        = nvmap_share_release,
255         .mmap           = nvmap_share_mmap,
256 };
257
258 int nvmap_ioctl_getfd(struct file *filp, void __user *arg)
259 {
260         struct nvmap_client *client = filp->private_data;
261         struct nvmap_create_handle op;
262         struct nvmap_handle *h = NULL;
263         int fd;
264         struct file *file;
265         ulong handle;
266
267         if (copy_from_user(&op, arg, sizeof(op)))
268                 return -EFAULT;
269
270         handle = unmarshal_user_handle(op.handle);
271         if (!handle)
272                 return -EINVAL;
273
274         h = nvmap_get_handle_id(client, handle);
275
276         if (!h)
277                 return -EPERM;
278
279         fd = get_unused_fd();
280         if (fd < 0)
281                 goto fail_fd;
282
283         file = anon_inode_getfile("nvmap_share_fd",
284                                     &nvmap_fd_fops, h, O_RDWR);
285         if (IS_ERR_OR_NULL(file))
286                 goto fail_file;
287         fd_install(fd, file);
288
289         op.fd = fd;
290         return copy_to_user(arg, &op, sizeof(op)) ? -EFAULT : 0;
291
292 fail_file:
293         put_unused_fd(fd);
294 fail_fd:
295         nvmap_handle_put(h);
296         return -ENFILE;
297 }
298
299 int nvmap_ioctl_alloc(struct file *filp, void __user *arg)
300 {
301         struct nvmap_alloc_handle op;
302         struct nvmap_client *client = filp->private_data;
303         ulong handle;
304
305         if (copy_from_user(&op, arg, sizeof(op)))
306                 return -EFAULT;
307
308         handle = unmarshal_user_handle(op.handle);
309         if (!handle)
310                 return -EINVAL;
311
312         if (op.align & (op.align - 1))
313                 return -EINVAL;
314
315         /* user-space handles are aligned to page boundaries, to prevent
316          * data leakage. */
317         op.align = max_t(size_t, op.align, PAGE_SIZE);
318 #if defined(CONFIG_NVMAP_FORCE_ZEROED_USER_PAGES)
319         op.flags |= NVMAP_HANDLE_ZEROED_PAGES;
320 #endif
321
322         return nvmap_alloc_handle_id(client, handle, op.heap_mask,
323                                      op.align,
324                                      0, /* no kind */
325                                      op.flags & (~NVMAP_HANDLE_KIND_SPECIFIED));
326 }
327
328 int nvmap_ioctl_alloc_kind(struct file *filp, void __user *arg)
329 {
330         struct nvmap_alloc_kind_handle op;
331         struct nvmap_client *client = filp->private_data;
332         ulong handle;
333
334         if (copy_from_user(&op, arg, sizeof(op)))
335                 return -EFAULT;
336
337         handle = unmarshal_user_handle(op.handle);
338         if (!handle)
339                 return -EINVAL;
340
341         if (op.align & (op.align - 1))
342                 return -EINVAL;
343
344         /* user-space handles are aligned to page boundaries, to prevent
345          * data leakage. */
346         op.align = max_t(size_t, op.align, PAGE_SIZE);
347 #if defined(CONFIG_NVMAP_FORCE_ZEROED_USER_PAGES)
348         op.flags |= NVMAP_HANDLE_ZEROED_PAGES;
349 #endif
350
351         return nvmap_alloc_handle_id(client, handle, op.heap_mask,
352                                      op.align,
353                                      op.kind,
354                                      op.flags);
355 }
356
357 int nvmap_ioctl_create(struct file *filp, unsigned int cmd, void __user *arg)
358 {
359         struct nvmap_create_handle op;
360         struct nvmap_handle_ref *ref = NULL;
361         struct nvmap_client *client = filp->private_data;
362         int err = 0;
363
364         if (copy_from_user(&op, arg, sizeof(op)))
365                 return -EFAULT;
366
367         if (!client)
368                 return -ENODEV;
369
370         if (cmd == NVMAP_IOC_CREATE) {
371                 ref = nvmap_create_handle(client, PAGE_ALIGN(op.size));
372                 if (!IS_ERR(ref))
373                         ref->handle->orig_size = op.size;
374         } else if (cmd == NVMAP_IOC_FROM_ID) {
375                 ref = nvmap_duplicate_handle_user_id(client, op.id);
376         } else if (cmd == NVMAP_IOC_FROM_FD) {
377                 ref = nvmap_create_handle_from_fd(client, op.fd);
378         } else {
379                 return -EINVAL;
380         }
381
382         if (IS_ERR(ref))
383                 return PTR_ERR(ref);
384
385         op.handle = marshal_kernel_handle(nvmap_ref_to_id(ref));
386         if (copy_to_user(arg, &op, sizeof(op))) {
387                 err = -EFAULT;
388                 nvmap_free_handle_id(client, nvmap_ref_to_id(ref));
389         }
390
391         return err;
392 }
393
394 int nvmap_map_into_caller_ptr(struct file *filp, void __user *arg)
395 {
396         struct nvmap_client *client = filp->private_data;
397         struct nvmap_map_caller op;
398         struct nvmap_vma_priv *vpriv;
399         struct vm_area_struct *vma;
400         struct nvmap_handle *h = NULL;
401         unsigned int cache_flags;
402         int err = 0;
403         ulong handle;
404
405         if (copy_from_user(&op, arg, sizeof(op)))
406                 return -EFAULT;
407
408         handle = unmarshal_user_handle(op.handle);
409
410         if (!handle)
411                 return -EINVAL;
412
413         h = nvmap_get_handle_id(client, handle);
414
415         if (!h)
416                 return -EPERM;
417
418         if(!h->alloc) {
419                 nvmap_handle_put(h);
420                 return -EFAULT;
421         }
422
423         trace_nvmap_map_into_caller_ptr(client, h, op.offset,
424                                         op.length, op.flags);
425         down_read(&current->mm->mmap_sem);
426
427         vma = find_vma(current->mm, op.addr);
428         if (!vma || !vma->vm_private_data) {
429                 err = -ENOMEM;
430                 goto out;
431         }
432
433         if (op.offset & ~PAGE_MASK) {
434                 err = -EFAULT;
435                 goto out;
436         }
437
438         if (op.offset >= h->size || op.length > h->size - op.offset) {
439                 err = -EADDRNOTAVAIL;
440                 goto out;
441         }
442
443         vpriv = vma->vm_private_data;
444         BUG_ON(!vpriv);
445
446         /* the VMA must exactly match the requested mapping operation, and the
447          * VMA that is targetted must have been created by this driver
448          */
449         if ((vma->vm_start != op.addr) || !is_nvmap_vma(vma) ||
450             (vma->vm_end-vma->vm_start != op.length)) {
451                 err = -EPERM;
452                 goto out;
453         }
454
455         /* verify that each mmap() system call creates a unique VMA */
456
457         if (vpriv->handle && (h == vpriv->handle)) {
458                 goto out;
459         } else if (vpriv->handle) {
460                 err = -EADDRNOTAVAIL;
461                 goto out;
462         }
463
464         if (!h->heap_pgalloc && (h->carveout->base & ~PAGE_MASK)) {
465                 err = -EFAULT;
466                 goto out;
467         }
468
469         vpriv->handle = h;
470         vpriv->offs = op.offset;
471
472         cache_flags = op.flags & NVMAP_HANDLE_CACHE_FLAG;
473         if ((cache_flags == NVMAP_HANDLE_INNER_CACHEABLE ||
474              cache_flags == NVMAP_HANDLE_CACHEABLE) &&
475             (h->flags == NVMAP_HANDLE_UNCACHEABLE ||
476              h->flags == NVMAP_HANDLE_WRITE_COMBINE)) {
477                 if (h->size & ~PAGE_MASK) {
478                         pr_err("\n%s:attempt to convert a buffer from uc/wc to"
479                                 " wb, whose size is not a multiple of page size."
480                                 " request ignored.\n", __func__);
481                 } else {
482                         unsigned int nr_page = h->size >> PAGE_SHIFT;
483                         wmb();
484                         /* override allocation time cache coherency attributes. */
485                         h->flags &= ~NVMAP_HANDLE_CACHE_FLAG;
486                         h->flags |= cache_flags;
487
488                         /* Update page attributes, if the memory is allocated
489                          *  from system heap pages.
490                          */
491                         if (cache_flags == NVMAP_HANDLE_INNER_CACHEABLE &&
492                                 h->heap_pgalloc)
493                                 set_pages_array_iwb(h->pgalloc.pages, nr_page);
494                         else if (h->heap_pgalloc)
495                                 set_pages_array_wb(h->pgalloc.pages, nr_page);
496                 }
497         }
498         vma->vm_page_prot = nvmap_pgprot(h, vma->vm_page_prot);
499
500 out:
501         up_read(&current->mm->mmap_sem);
502
503         if (err)
504                 nvmap_handle_put(h);
505         return err;
506 }
507
508 int nvmap_ioctl_get_param(struct file *filp, void __user* arg)
509 {
510         struct nvmap_handle_param op;
511         struct nvmap_client *client = filp->private_data;
512         struct nvmap_handle_ref *ref;
513         struct nvmap_handle *h;
514         u64 result;
515         int err = 0;
516         ulong handle;
517
518         if (copy_from_user(&op, arg, sizeof(op)))
519                 return -EFAULT;
520
521         handle = unmarshal_user_handle(op.handle);
522         h = nvmap_get_handle_id(client, handle);
523         if (!h)
524                 return -EINVAL;
525
526         nvmap_ref_lock(client);
527         ref = _nvmap_validate_id_locked(client, handle);
528         if (IS_ERR_OR_NULL(ref)) {
529                 err = ref ? PTR_ERR(ref) : -EINVAL;
530                 goto ref_fail;
531         }
532
533         err = nvmap_get_handle_param(client, ref, op.param, &result);
534         op.result = (long unsigned int)result;
535
536         if (!err && copy_to_user(arg, &op, sizeof(op)))
537                 err = -EFAULT;
538
539 ref_fail:
540         nvmap_ref_unlock(client);
541         nvmap_handle_put(h);
542         return err;
543 }
544
545 int nvmap_ioctl_rw_handle(struct file *filp, int is_read, void __user* arg)
546 {
547         struct nvmap_client *client = filp->private_data;
548         struct nvmap_rw_handle __user *uarg = arg;
549         struct nvmap_rw_handle op;
550         struct nvmap_handle *h;
551         ssize_t copied;
552         int err = 0;
553         ulong handle;
554
555         if (copy_from_user(&op, arg, sizeof(op)))
556                 return -EFAULT;
557
558         handle = unmarshal_user_handle(op.handle);
559         if (!handle || !op.addr || !op.count || !op.elem_size)
560                 return -EINVAL;
561
562         h = nvmap_get_handle_id(client, handle);
563         if (!h)
564                 return -EPERM;
565
566         trace_nvmap_ioctl_rw_handle(client, h, is_read, op.offset,
567                                     op.addr, op.hmem_stride,
568                                     op.user_stride, op.elem_size, op.count);
569         copied = rw_handle(client, h, is_read, op.offset,
570                            (unsigned long)op.addr, op.hmem_stride,
571                            op.user_stride, op.elem_size, op.count);
572
573         if (copied < 0) {
574                 err = copied;
575                 copied = 0;
576         } else if (copied < (op.count * op.elem_size))
577                 err = -EINTR;
578
579         __put_user(copied, &uarg->count);
580
581         nvmap_handle_put(h);
582
583         return err;
584 }
585
586 int nvmap_ioctl_cache_maint(struct file *filp, void __user *arg)
587 {
588         struct nvmap_client *client = filp->private_data;
589         struct nvmap_cache_op op;
590         struct vm_area_struct *vma;
591         struct nvmap_vma_priv *vpriv;
592         unsigned long start;
593         unsigned long end;
594         int err = 0;
595         ulong handle;
596
597         if (copy_from_user(&op, arg, sizeof(op)))
598                 return -EFAULT;
599
600         handle = unmarshal_user_handle(op.handle);
601         if (!handle || !op.addr || op.op < NVMAP_CACHE_OP_WB ||
602             op.op > NVMAP_CACHE_OP_WB_INV)
603                 return -EINVAL;
604
605         down_read(&current->mm->mmap_sem);
606
607         vma = find_vma(current->active_mm, (unsigned long)op.addr);
608         if (!vma || !is_nvmap_vma(vma) ||
609             (ulong)op.addr < vma->vm_start ||
610             (ulong)op.addr >= vma->vm_end ||
611             op.len > vma->vm_end - (ulong)op.addr) {
612                 err = -EADDRNOTAVAIL;
613                 goto out;
614         }
615
616         vpriv = (struct nvmap_vma_priv *)vma->vm_private_data;
617
618         if ((unsigned long)vpriv->handle != handle) {
619                 err = -EFAULT;
620                 goto out;
621         }
622
623         start = (unsigned long)op.addr - vma->vm_start +
624                 (vma->vm_pgoff << PAGE_SHIFT);
625         end = start + op.len;
626
627         err = cache_maint(client, vpriv->handle, start, end, op.op,
628                 CACHE_MAINT_ALLOW_DEFERRED);
629 out:
630         up_read(&current->mm->mmap_sem);
631         return err;
632 }
633
634 int nvmap_ioctl_free(struct file *filp, unsigned long arg)
635 {
636         struct nvmap_client *client = filp->private_data;
637
638         if (!arg)
639                 return 0;
640
641         nvmap_free_handle_user_id(client, arg);
642         return 0;
643 }
644
645 static void inner_cache_maint(unsigned int op, void *vaddr, size_t size)
646 {
647         if (op == NVMAP_CACHE_OP_WB_INV)
648                 dmac_flush_range(vaddr, vaddr + size);
649         else if (op == NVMAP_CACHE_OP_INV)
650                 dmac_map_area(vaddr, size, DMA_FROM_DEVICE);
651         else
652                 dmac_map_area(vaddr, size, DMA_TO_DEVICE);
653 }
654
655 static void outer_cache_maint(unsigned int op, phys_addr_t paddr, size_t size)
656 {
657 #ifndef CONFIG_ARM64
658         if (op == NVMAP_CACHE_OP_WB_INV)
659                 outer_flush_range(paddr, paddr + size);
660         else if (op == NVMAP_CACHE_OP_INV)
661                 outer_inv_range(paddr, paddr + size);
662         else
663                 outer_clean_range(paddr, paddr + size);
664 #endif
665 }
666
667 static void heap_page_cache_maint(
668         struct nvmap_handle *h, unsigned long start, unsigned long end,
669         unsigned int op, bool inner, bool outer, pte_t **pte,
670         unsigned long kaddr, pgprot_t prot)
671 {
672         struct page *page;
673         phys_addr_t paddr;
674         unsigned long next;
675         unsigned long off;
676         size_t size;
677
678         while (start < end) {
679                 page = h->pgalloc.pages[start >> PAGE_SHIFT];
680                 next = min(((start + PAGE_SIZE) & PAGE_MASK), end);
681                 off = start & ~PAGE_MASK;
682                 size = next - start;
683                 paddr = page_to_phys(page) + off;
684
685                 if (inner) {
686                         void *vaddr = (void *)kaddr + off;
687                         BUG_ON(!pte);
688                         BUG_ON(!kaddr);
689                         set_pte_at(&init_mm, kaddr, *pte,
690                                 pfn_pte(__phys_to_pfn(paddr), prot));
691                         nvmap_flush_tlb_kernel_page(kaddr);
692                         inner_cache_maint(op, vaddr, size);
693                 }
694
695                 if (outer)
696                         outer_cache_maint(op, paddr, size);
697                 start = next;
698         }
699 }
700
701 #if defined(CONFIG_NVMAP_OUTER_CACHE_MAINT_BY_SET_WAYS)
702 static bool fast_cache_maint_outer(unsigned long start,
703                 unsigned long end, unsigned int op)
704 {
705         bool result = false;
706         if (end - start >= cache_maint_outer_threshold) {
707                 if (op == NVMAP_CACHE_OP_WB_INV) {
708                         outer_flush_all();
709                         result = true;
710                 }
711                 if (op == NVMAP_CACHE_OP_WB) {
712                         outer_clean_all();
713                         result = true;
714                 }
715         }
716
717         return result;
718 }
719 #else
720 static inline bool fast_cache_maint_outer(unsigned long start,
721                 unsigned long end, unsigned int op)
722 {
723         return false;
724 }
725 #endif
726
727 #if defined(CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS)
728 static bool fast_cache_maint(struct nvmap_handle *h,
729         unsigned long start,
730         unsigned long end, unsigned int op)
731 {
732         if ((op == NVMAP_CACHE_OP_INV) ||
733                 ((end - start) < cache_maint_inner_threshold))
734                 return false;
735
736         if (op == NVMAP_CACHE_OP_WB_INV)
737                 inner_flush_cache_all();
738         else if (op == NVMAP_CACHE_OP_WB)
739                 inner_clean_cache_all();
740
741         /* outer maintenance */
742         if (h->flags != NVMAP_HANDLE_INNER_CACHEABLE) {
743                 if(!fast_cache_maint_outer(start, end, op))
744                 {
745                         if (h->heap_pgalloc) {
746                                 heap_page_cache_maint(h, start,
747                                         end, op, false, true, NULL, 0, 0);
748                         } else  {
749                                 phys_addr_t pstart;
750
751                                 pstart = start + h->carveout->base;
752                                 outer_cache_maint(op, pstart, end - start);
753                         }
754                 }
755         }
756         return true;
757 }
758 #else
759 static inline bool fast_cache_maint(struct nvmap_handle *h,
760                                     unsigned long start, unsigned long end,
761                                     unsigned int op)
762 {
763         return false;
764 }
765 #endif
766
767 static void debug_count_requested_op(struct nvmap_deferred_ops *deferred_ops,
768                 unsigned long size, unsigned int flags)
769 {
770         unsigned long inner_flush_size = size;
771         unsigned long outer_flush_size = size;
772         (void) outer_flush_size;
773
774 #ifdef CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS
775         inner_flush_size = min(size, (unsigned long)
776                 cache_maint_inner_threshold);
777 #endif
778
779 #if defined(CONFIG_NVMAP_OUTER_CACHE_MAINT_BY_SET_WAYS)
780         outer_flush_size = min(size, (unsigned long)
781                 cache_maint_outer_threshold);
782 #endif
783
784         if (flags == NVMAP_HANDLE_INNER_CACHEABLE)
785                 deferred_ops->deferred_maint_inner_requested +=
786                                 inner_flush_size;
787
788         if (flags == NVMAP_HANDLE_CACHEABLE) {
789                 deferred_ops->deferred_maint_inner_requested +=
790                                 inner_flush_size;
791 #ifdef CONFIG_OUTER_CACHE
792                 deferred_ops->deferred_maint_outer_requested +=
793                                 outer_flush_size;
794 #endif /* CONFIG_OUTER_CACHE */
795         }
796 }
797
798 static void debug_count_flushed_op(struct nvmap_deferred_ops *deferred_ops,
799                 unsigned long size, unsigned int flags)
800 {
801         unsigned long inner_flush_size = size;
802         unsigned long outer_flush_size = size;
803         (void) outer_flush_size;
804
805 #ifdef CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS
806         inner_flush_size = min(size, (unsigned long)
807                 cache_maint_inner_threshold);
808 #endif
809
810 #if defined(CONFIG_NVMAP_OUTER_CACHE_MAINT_BY_SET_WAYS)
811         outer_flush_size = min(size, (unsigned long)
812                 cache_maint_outer_threshold);
813 #endif
814
815         if (flags == NVMAP_HANDLE_INNER_CACHEABLE)
816                 deferred_ops->deferred_maint_inner_flushed +=
817                                 inner_flush_size;
818
819         if (flags == NVMAP_HANDLE_CACHEABLE) {
820                 deferred_ops->deferred_maint_inner_flushed +=
821                                 inner_flush_size;
822 #ifdef CONFIG_OUTER_CACHE
823                 deferred_ops->deferred_maint_outer_flushed +=
824                                 outer_flush_size;
825 #endif /* CONFIG_OUTER_CACHE */
826         }
827 }
828
829 struct cache_maint_op {
830         struct list_head list_data;
831         phys_addr_t start;
832         phys_addr_t end;
833         unsigned int op;
834         struct nvmap_handle *h;
835         int error;
836         bool inner;
837         bool outer;
838 };
839
840 static void cache_maint_work_funct(struct cache_maint_op *cache_work)
841 {
842         pgprot_t prot;
843         pte_t **pte = NULL;
844         unsigned long kaddr;
845         phys_addr_t pstart = cache_work->start;
846         phys_addr_t pend = cache_work->end;
847         phys_addr_t loop;
848         int err = 0;
849         struct nvmap_handle *h = cache_work->h;
850         struct nvmap_client *client = h->owner;
851         unsigned int op = cache_work->op;
852
853         BUG_ON(!h);
854
855         h = nvmap_handle_get(h);
856         if (!h) {
857                 cache_work->error = -EFAULT;
858                 return;
859         }
860         if (!h->alloc) {
861                 cache_work->error = -EFAULT;
862                 goto out;
863         }
864
865         if (client)
866                 trace_cache_maint(client, h, pstart, pend, op);
867         wmb();
868         if (h->flags == NVMAP_HANDLE_UNCACHEABLE ||
869             h->flags == NVMAP_HANDLE_WRITE_COMBINE || pstart == pend)
870                 goto out;
871
872         if (fast_cache_maint(h, pstart, pend, op))
873                 goto out;
874
875         prot = nvmap_pgprot(h, pgprot_kernel);
876         pte = nvmap_alloc_pte(h->dev, (void **)&kaddr);
877         if (IS_ERR(pte)) {
878                 err = PTR_ERR(pte);
879                 pte = NULL;
880                 goto out;
881         }
882
883         if (h->heap_pgalloc) {
884                 heap_page_cache_maint(h, pstart, pend, op, true,
885                         (h->flags == NVMAP_HANDLE_INNER_CACHEABLE) ?
886                                         false : true,
887                         pte, kaddr, prot);
888                 goto out;
889         }
890
891         if (pstart > h->size || pend > h->size) {
892                 pr_warn("cache maintenance outside handle\n");
893                 cache_work->error = -EINVAL;
894                 goto out;
895         }
896
897         pstart += h->carveout->base;
898         pend += h->carveout->base;
899
900         loop = pstart;
901
902         while (loop < pend) {
903                 phys_addr_t next = (loop + PAGE_SIZE) & PAGE_MASK;
904                 void *base = (void *)kaddr + (loop & ~PAGE_MASK);
905                 next = min(next, pend);
906
907                 set_pte_at(&init_mm, kaddr, *pte,
908                            pfn_pte(__phys_to_pfn(loop), prot));
909                 nvmap_flush_tlb_kernel_page(kaddr);
910
911                 inner_cache_maint(op, base, next - loop);
912                 loop = next;
913         }
914
915         if (h->flags != NVMAP_HANDLE_INNER_CACHEABLE)
916                 outer_cache_maint(op, pstart, pend - pstart);
917
918 out:
919         if (pte)
920                 nvmap_free_pte(h->dev, pte);
921         nvmap_handle_put(h);
922         return;
923 }
924
925 int nvmap_find_cache_maint_op(struct nvmap_device *dev,
926                 struct nvmap_handle *h) {
927         struct nvmap_deferred_ops *deferred_ops =
928                         nvmap_get_deferred_ops_from_dev(dev);
929         struct cache_maint_op *cache_op = NULL;
930         spin_lock(&deferred_ops->deferred_ops_lock);
931         list_for_each_entry(cache_op, &deferred_ops->ops_list, list_data) {
932                 if (cache_op->h == h) {
933                         spin_unlock(&deferred_ops->deferred_ops_lock);
934                         return true;
935                 }
936         }
937         spin_unlock(&deferred_ops->deferred_ops_lock);
938         return false;
939 }
940
941 void nvmap_cache_maint_ops_flush(struct nvmap_device *dev,
942                 struct nvmap_handle *h) {
943
944         struct nvmap_deferred_ops *deferred_ops =
945                 nvmap_get_deferred_ops_from_dev(dev);
946
947         struct cache_maint_op *cache_op = NULL;
948         struct cache_maint_op *temp = NULL;
949
950         size_t flush_size_outer_inner = 0;
951         size_t flush_size_inner = 0;
952 #ifdef CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS
953         bool allow_outer_flush_by_ways;
954 #endif
955         struct list_head flushed_ops;
956
957         (void) flush_size_outer_inner;
958         (void) flush_size_inner;
959         INIT_LIST_HEAD(&flushed_ops);
960
961 #ifdef CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS
962         /* go through deferred ops, check if we can just do full L1/L2 flush
963          we only do list operation inside lock, actual maintenance shouldn't
964          block list operations */
965         spin_lock(&deferred_ops->deferred_ops_lock);
966
967 #ifdef CONFIG_NVMAP_OUTER_CACHE_MAINT_BY_SET_WAYS
968         allow_outer_flush_by_ways =
969                         cache_maint_outer_threshold >
970                                 cache_maint_inner_threshold;
971 #else
972         allow_outer_flush_by_ways = false;
973 #endif
974
975         if (list_empty(&deferred_ops->ops_list)) {
976                 spin_unlock(&deferred_ops->deferred_ops_lock);
977                 return;
978         }
979
980         /* count sum of inner and outer flush ranges */
981         list_for_each_entry(cache_op, &deferred_ops->ops_list, list_data) {
982                 if (cache_op->op == NVMAP_CACHE_OP_WB_INV) {
983                         unsigned long range =
984                                         cache_op->end - cache_op->start;
985                         if (allow_outer_flush_by_ways &&
986                                 cache_op->outer && cache_op->inner)
987                                 flush_size_outer_inner += range;
988                         else
989                         if (cache_op->inner && !cache_op->outer)
990                                 flush_size_inner += range;
991                 }
992         }
993         /* collect all flush operations */
994         if (flush_size_outer_inner > cache_maint_outer_threshold) {
995                 list_for_each_entry_safe(cache_op, temp,
996                                         &deferred_ops->ops_list, list_data) {
997                         if (cache_op->op == NVMAP_CACHE_OP_WB_INV &&
998                                         (cache_op->outer && cache_op->inner))
999                                 list_move(&cache_op->list_data, &flushed_ops);
1000                 }
1001                 debug_count_flushed_op(deferred_ops,
1002                                 cache_maint_outer_threshold,
1003                                 NVMAP_HANDLE_CACHEABLE);
1004                 debug_count_flushed_op(deferred_ops,
1005                                 cache_maint_inner_threshold,
1006                                 NVMAP_HANDLE_INNER_CACHEABLE);
1007         } else if (flush_size_inner > cache_maint_inner_threshold) {
1008                 list_for_each_entry_safe(cache_op, temp,
1009                                 &deferred_ops->ops_list, list_data) {
1010                         if (cache_op->op == NVMAP_CACHE_OP_WB_INV &&
1011                                         (cache_op->inner && !cache_op->outer))
1012                                 list_move(&cache_op->list_data, &flushed_ops);
1013                 }
1014                 debug_count_flushed_op(deferred_ops,
1015                                 cache_maint_inner_threshold,
1016                                 NVMAP_HANDLE_INNER_CACHEABLE);
1017         }
1018         spin_unlock(&deferred_ops->deferred_ops_lock);
1019
1020         /* do actual maintenance outside spinlock */
1021         if (flush_size_outer_inner > cache_maint_outer_threshold) {
1022                 inner_flush_cache_all();
1023                 outer_flush_all();
1024                 /* cleanup finished ops */
1025                 list_for_each_entry_safe(cache_op, temp,
1026                                 &flushed_ops, list_data) {
1027                         list_del(&cache_op->list_data);
1028                         nvmap_handle_put(cache_op->h);
1029                         kfree(cache_op);
1030                 }
1031         } else if (flush_size_inner > cache_maint_inner_threshold) {
1032                 /* Flush only inner-cached entries */
1033                 inner_flush_cache_all();
1034                 /* cleanup finished ops */
1035                 list_for_each_entry_safe(cache_op, temp,
1036                                 &flushed_ops, list_data) {
1037                         list_del(&cache_op->list_data);
1038                         nvmap_handle_put(cache_op->h);
1039                         kfree(cache_op);
1040                 }
1041         }
1042 #endif
1043         /* Flush other handles (all or only requested) */
1044         spin_lock(&deferred_ops->deferred_ops_lock);
1045         list_for_each_entry_safe(cache_op, temp,
1046                         &deferred_ops->ops_list, list_data) {
1047                 if (!h || cache_op->h == h)
1048                         list_move(&cache_op->list_data, &flushed_ops);
1049         }
1050         spin_unlock(&deferred_ops->deferred_ops_lock);
1051
1052         list_for_each_entry_safe(cache_op, temp,
1053                         &flushed_ops, list_data) {
1054
1055                 cache_maint_work_funct(cache_op);
1056
1057                 if (cache_op->op == NVMAP_CACHE_OP_WB_INV)
1058                         debug_count_flushed_op(deferred_ops,
1059                                 cache_op->end - cache_op->start,
1060                                 cache_op->h->flags);
1061
1062                 list_del(&cache_op->list_data);
1063                 nvmap_handle_put(cache_op->h);
1064                 kfree(cache_op);
1065         }
1066 }
1067
1068 static int cache_maint(struct nvmap_client *client,
1069                         struct nvmap_handle *h,
1070                         unsigned long start, unsigned long end,
1071                         unsigned int op, unsigned int allow_deferred)
1072 {
1073         int err = 0;
1074         struct nvmap_deferred_ops *deferred_ops =
1075                 nvmap_get_deferred_ops_from_dev(client->dev);
1076         bool inner_maint = false;
1077         bool outer_maint = false;
1078
1079         h = nvmap_handle_get(h);
1080         if (!h)
1081                 return -EFAULT;
1082
1083         /* count requested flush ops */
1084         if (op == NVMAP_CACHE_OP_WB_INV) {
1085                 spin_lock(&deferred_ops->deferred_ops_lock);
1086                 debug_count_requested_op(deferred_ops,
1087                                 end - start, h->flags);
1088                 spin_unlock(&deferred_ops->deferred_ops_lock);
1089         }
1090
1091         inner_maint = h->flags == NVMAP_HANDLE_CACHEABLE ||
1092                         h->flags == NVMAP_HANDLE_INNER_CACHEABLE;
1093
1094 #ifdef CONFIG_OUTER_CACHE
1095         outer_maint = h->flags == NVMAP_HANDLE_CACHEABLE;
1096 #endif
1097
1098         /* Finish deferred maintenance for the handle before invalidating */
1099         if (op == NVMAP_CACHE_OP_INV &&
1100                         nvmap_find_cache_maint_op(h->dev, h)) {
1101                 struct nvmap_share *share = nvmap_get_share_from_dev(h->dev);
1102                 mutex_lock(&share->pin_lock);
1103                 nvmap_cache_maint_ops_flush(h->dev, h);
1104                 mutex_unlock(&share->pin_lock);
1105         }
1106
1107         if (op == NVMAP_CACHE_OP_WB_INV &&
1108                         (inner_maint || outer_maint) &&
1109                         allow_deferred == CACHE_MAINT_ALLOW_DEFERRED &&
1110                         atomic_read(&h->pin) == 0 &&
1111                         !h->nvhost_priv &&
1112                         deferred_ops->enable_deferred_cache_maintenance) {
1113
1114                 struct cache_maint_op *cache_op;
1115
1116                 cache_op = (struct cache_maint_op *)
1117                                 kmalloc(sizeof(struct cache_maint_op),
1118                                         GFP_KERNEL);
1119                 cache_op->h = h;
1120                 cache_op->start = start;
1121                 cache_op->end = end;
1122                 cache_op->op = op;
1123                 cache_op->inner = inner_maint;
1124                 cache_op->outer = outer_maint;
1125
1126                 spin_lock(&deferred_ops->deferred_ops_lock);
1127                         list_add_tail(&cache_op->list_data,
1128                                 &deferred_ops->ops_list);
1129                 spin_unlock(&deferred_ops->deferred_ops_lock);
1130         } else {
1131                 struct cache_maint_op cache_op;
1132
1133                 cache_op.h = h;
1134                 cache_op.start = start;
1135                 cache_op.end = end;
1136                 cache_op.op = op;
1137                 cache_op.inner = inner_maint;
1138                 cache_op.outer = outer_maint;
1139
1140                 cache_maint_work_funct(&cache_op);
1141
1142                 if (op == NVMAP_CACHE_OP_WB_INV) {
1143                         spin_lock(&deferred_ops->deferred_ops_lock);
1144                         debug_count_flushed_op(deferred_ops,
1145                                 end - start, h->flags);
1146                         spin_unlock(&deferred_ops->deferred_ops_lock);
1147                 }
1148
1149                 err = cache_op.error;
1150                 nvmap_handle_put(h);
1151         }
1152         return 0;
1153 }
1154
1155 static int rw_handle_page(struct nvmap_handle *h, int is_read,
1156                           unsigned long start, unsigned long rw_addr,
1157                           unsigned long bytes, unsigned long kaddr, pte_t *pte)
1158 {
1159         pgprot_t prot = nvmap_pgprot(h, pgprot_kernel);
1160         unsigned long end = start + bytes;
1161         int err = 0;
1162
1163         while (!err && start < end) {
1164                 struct page *page = NULL;
1165                 phys_addr_t phys;
1166                 size_t count;
1167                 void *src;
1168
1169                 if (!h->heap_pgalloc) {
1170                         phys = h->carveout->base + start;
1171                 } else {
1172                         page = h->pgalloc.pages[start >> PAGE_SHIFT];
1173                         BUG_ON(!page);
1174                         get_page(page);
1175                         phys = page_to_phys(page) + (start & ~PAGE_MASK);
1176                 }
1177
1178                 set_pte_at(&init_mm, kaddr, pte,
1179                            pfn_pte(__phys_to_pfn(phys), prot));
1180                 nvmap_flush_tlb_kernel_page(kaddr);
1181
1182                 src = (void *)kaddr + (phys & ~PAGE_MASK);
1183                 phys = PAGE_SIZE - (phys & ~PAGE_MASK);
1184                 count = min_t(size_t, end - start, phys);
1185
1186                 if (is_read)
1187                         err = copy_to_user((void *)rw_addr, src, count);
1188                 else
1189                         err = copy_from_user(src, (void *)rw_addr, count);
1190
1191                 if (err)
1192                         err = -EFAULT;
1193
1194                 rw_addr += count;
1195                 start += count;
1196
1197                 if (page)
1198                         put_page(page);
1199         }
1200
1201         return err;
1202 }
1203
1204 static ssize_t rw_handle(struct nvmap_client *client, struct nvmap_handle *h,
1205                          int is_read, unsigned long h_offs,
1206                          unsigned long sys_addr, unsigned long h_stride,
1207                          unsigned long sys_stride, unsigned long elem_size,
1208                          unsigned long count)
1209 {
1210         ssize_t copied = 0;
1211         pte_t **pte;
1212         void *addr;
1213         int ret = 0;
1214
1215         if (!elem_size)
1216                 return -EINVAL;
1217
1218         if (!h->alloc)
1219                 return -EFAULT;
1220
1221         if (elem_size == h_stride && elem_size == sys_stride) {
1222                 elem_size *= count;
1223                 h_stride = elem_size;
1224                 sys_stride = elem_size;
1225                 count = 1;
1226         }
1227
1228         pte = nvmap_alloc_pte(client->dev, &addr);
1229         if (IS_ERR(pte))
1230                 return PTR_ERR(pte);
1231
1232         while (count--) {
1233                 if (h_offs + elem_size > h->size) {
1234                         nvmap_warn(client, "read/write outside of handle\n");
1235                         ret = -EFAULT;
1236                         break;
1237                 }
1238                 if (is_read)
1239                         cache_maint(client, h, h_offs,
1240                                 h_offs + elem_size, NVMAP_CACHE_OP_INV,
1241                                 CACHE_MAINT_IMMEDIATE);
1242
1243                 ret = rw_handle_page(h, is_read, h_offs, sys_addr,
1244                                      elem_size, (unsigned long)addr, *pte);
1245
1246                 if (ret)
1247                         break;
1248
1249                 if (!is_read)
1250                         cache_maint(client, h, h_offs,
1251                                 h_offs + elem_size, NVMAP_CACHE_OP_WB_INV,
1252                                 CACHE_MAINT_IMMEDIATE);
1253
1254                 copied += elem_size;
1255                 sys_addr += sys_stride;
1256                 h_offs += h_stride;
1257         }
1258
1259         nvmap_free_pte(client->dev, pte);
1260         return ret ?: copied;
1261 }