bb15699ad85550f878f7b477c0e6bf1f53b1cd17
[linux-2.6.git] / drivers / video / tegra / nvmap / nvmap_ioctl.c
1 /*
2  * drivers/video/tegra/nvmap/nvmap_ioctl.c
3  *
4  * User-space interface to nvmap
5  *
6  * Copyright (c) 2011-2012, NVIDIA CORPORATION. All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include <linux/dma-mapping.h>
24 #include <linux/fs.h>
25 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/uaccess.h>
28 #include <linux/nvmap.h>
29
30 #include <asm/cacheflush.h>
31 #include <asm/outercache.h>
32 #include <asm/tlbflush.h>
33
34 #include <mach/iovmm.h>
35 #include <trace/events/nvmap.h>
36
37 #include "nvmap_ioctl.h"
38 #include "nvmap.h"
39 #include "nvmap_common.h"
40
41 static ssize_t rw_handle(struct nvmap_client *client, struct nvmap_handle *h,
42                          int is_read, unsigned long h_offs,
43                          unsigned long sys_addr, unsigned long h_stride,
44                          unsigned long sys_stride, unsigned long elem_size,
45                          unsigned long count);
46
47 static int cache_maint(struct nvmap_client *client, struct nvmap_handle *h,
48                        unsigned long start, unsigned long end, unsigned int op);
49
50
51 int nvmap_ioctl_pinop(struct file *filp, bool is_pin, void __user *arg)
52 {
53         struct nvmap_pin_handle op;
54         struct nvmap_handle *h;
55         unsigned long on_stack[16];
56         unsigned long *refs;
57         unsigned long __user *output;
58         unsigned int i;
59         int err = 0;
60
61         if (copy_from_user(&op, arg, sizeof(op)))
62                 return -EFAULT;
63
64         if (!op.count)
65                 return -EINVAL;
66
67         if (op.count > 1) {
68                 size_t bytes = op.count * sizeof(*refs); /* kcalloc below will catch overflow. */
69
70                 if (op.count > ARRAY_SIZE(on_stack))
71                         refs = kcalloc(op.count, sizeof(*refs), GFP_KERNEL);
72                 else
73                         refs = on_stack;
74
75                 if (!refs)
76                         return -ENOMEM;
77
78                 if (copy_from_user(refs, (void *)op.handles, bytes)) {
79                         err = -EFAULT;
80                         goto out;
81                 }
82         } else {
83                 refs = on_stack;
84                 on_stack[0] = (unsigned long)op.handles;
85         }
86
87         trace_nvmap_ioctl_pinop(filp->private_data, is_pin, op.count, refs);
88         if (is_pin)
89                 err = nvmap_pin_ids(filp->private_data, op.count, refs);
90         else
91                 nvmap_unpin_ids(filp->private_data, op.count, refs);
92
93         /* skip the output stage on unpin */
94         if (err || !is_pin)
95                 goto out;
96
97         /* it is guaranteed that if nvmap_pin_ids returns 0 that
98          * all of the handle_ref objects are valid, so dereferencing
99          * directly here is safe */
100         if (op.count > 1)
101                 output = (unsigned long __user *)op.addr;
102         else {
103                 struct nvmap_pin_handle __user *tmp = arg;
104                 output = (unsigned long __user *)&(tmp->addr);
105         }
106
107         if (!output)
108                 goto out;
109
110         for (i = 0; i < op.count && !err; i++) {
111                 unsigned long addr;
112
113                 h = (struct nvmap_handle *)refs[i];
114
115                 if (h->heap_pgalloc && h->pgalloc.contig)
116                         addr = page_to_phys(h->pgalloc.pages[0]);
117                 else if (h->heap_pgalloc)
118                         addr = h->pgalloc.area->iovm_start;
119                 else
120                         addr = h->carveout->base;
121
122                 err = put_user(addr, &output[i]);
123         }
124
125         if (err)
126                 nvmap_unpin_ids(filp->private_data, op.count, refs);
127
128 out:
129         if (refs != on_stack)
130                 kfree(refs);
131
132         return err;
133 }
134
135 int nvmap_ioctl_getid(struct file *filp, void __user *arg)
136 {
137         struct nvmap_client *client = filp->private_data;
138         struct nvmap_create_handle op;
139         struct nvmap_handle *h = NULL;
140
141         if (copy_from_user(&op, arg, sizeof(op)))
142                 return -EFAULT;
143
144         if (!op.handle)
145                 return -EINVAL;
146
147         h = nvmap_get_handle_id(client, op.handle);
148
149         if (!h)
150                 return -EPERM;
151
152         op.id = (__u32)h;
153         if (client == h->owner)
154                 h->global = true;
155
156         nvmap_handle_put(h);
157
158         return copy_to_user(arg, &op, sizeof(op)) ? -EFAULT : 0;
159 }
160
161 int nvmap_ioctl_alloc(struct file *filp, void __user *arg)
162 {
163         struct nvmap_alloc_handle op;
164         struct nvmap_client *client = filp->private_data;
165
166         if (copy_from_user(&op, arg, sizeof(op)))
167                 return -EFAULT;
168
169         if (!op.handle)
170                 return -EINVAL;
171
172         if (op.align & (op.align - 1))
173                 return -EINVAL;
174
175         /* user-space handles are aligned to page boundaries, to prevent
176          * data leakage. */
177         op.align = max_t(size_t, op.align, PAGE_SIZE);
178 #if defined(CONFIG_NVMAP_FORCE_ZEROED_USER_PAGES)
179         op.flags |= NVMAP_HANDLE_ZEROED_PAGES;
180 #endif
181
182         return nvmap_alloc_handle_id(client, op.handle, op.heap_mask,
183                                      op.align, op.flags);
184 }
185
186 int nvmap_ioctl_create(struct file *filp, unsigned int cmd, void __user *arg)
187 {
188         struct nvmap_create_handle op;
189         struct nvmap_handle_ref *ref = NULL;
190         struct nvmap_client *client = filp->private_data;
191         int err = 0;
192
193         if (copy_from_user(&op, arg, sizeof(op)))
194                 return -EFAULT;
195
196         if (!client)
197                 return -ENODEV;
198
199         if (cmd == NVMAP_IOC_CREATE) {
200                 ref = nvmap_create_handle(client, PAGE_ALIGN(op.size));
201                 if (!IS_ERR(ref))
202                         ref->handle->orig_size = op.size;
203         } else if (cmd == NVMAP_IOC_FROM_ID) {
204                 ref = nvmap_duplicate_handle_id(client, op.id);
205         } else {
206                 return -EINVAL;
207         }
208
209         if (IS_ERR(ref))
210                 return PTR_ERR(ref);
211
212         op.handle = nvmap_ref_to_id(ref);
213         if (copy_to_user(arg, &op, sizeof(op))) {
214                 err = -EFAULT;
215                 nvmap_free_handle_id(client, op.handle);
216         }
217
218         return err;
219 }
220
221 int nvmap_map_into_caller_ptr(struct file *filp, void __user *arg)
222 {
223         struct nvmap_client *client = filp->private_data;
224         struct nvmap_map_caller op;
225         struct nvmap_vma_priv *vpriv;
226         struct vm_area_struct *vma;
227         struct nvmap_handle *h = NULL;
228         unsigned int cache_flags;
229         int err = 0;
230
231         if (copy_from_user(&op, arg, sizeof(op)))
232                 return -EFAULT;
233
234         if (!op.handle)
235                 return -EINVAL;
236
237         h = nvmap_get_handle_id(client, op.handle);
238
239         if (!h)
240                 return -EPERM;
241
242         trace_nvmap_map_into_caller_ptr(client, h, op.offset,
243                                         op.length, op.flags);
244         down_read(&current->mm->mmap_sem);
245
246         vma = find_vma(current->mm, op.addr);
247         if (!vma || !vma->vm_private_data) {
248                 err = -ENOMEM;
249                 goto out;
250         }
251
252         if (op.offset & ~PAGE_MASK) {
253                 err = -EFAULT;
254                 goto out;
255         }
256
257         if (op.offset > h->size || (op.offset + op.length) > h->size) {
258                 err = -EADDRNOTAVAIL;
259                 goto out;
260         }
261
262         vpriv = vma->vm_private_data;
263         BUG_ON(!vpriv);
264
265         /* the VMA must exactly match the requested mapping operation, and the
266          * VMA that is targetted must have been created by this driver
267          */
268         if ((vma->vm_start != op.addr) || !is_nvmap_vma(vma) ||
269             (vma->vm_end-vma->vm_start != op.length)) {
270                 err = -EPERM;
271                 goto out;
272         }
273
274         /* verify that each mmap() system call creates a unique VMA */
275
276         if (vpriv->handle && (h == vpriv->handle)) {
277                 goto out;
278         } else if (vpriv->handle) {
279                 err = -EADDRNOTAVAIL;
280                 goto out;
281         }
282
283         nvmap_usecount_inc(h);
284
285         if (!h->heap_pgalloc && (h->carveout->base & ~PAGE_MASK)) {
286                 nvmap_usecount_dec(h);
287                 err = -EFAULT;
288                 goto out;
289         }
290
291         vpriv->handle = h;
292         vpriv->offs = op.offset;
293
294         cache_flags = op.flags & NVMAP_HANDLE_CACHE_FLAG;
295         if ((cache_flags == NVMAP_HANDLE_INNER_CACHEABLE ||
296              cache_flags == NVMAP_HANDLE_CACHEABLE) &&
297             (h->flags == NVMAP_HANDLE_UNCACHEABLE ||
298              h->flags == NVMAP_HANDLE_WRITE_COMBINE)) {
299                 if (h->size & ~PAGE_MASK) {
300                         pr_err("\n%s:attempt to convert a buffer from uc/wc to"
301                                 " wb, whose size is not a multiple of page size."
302                                 " request ignored.\n", __func__);
303                 } else {
304                         unsigned int nr_page = h->size >> PAGE_SHIFT;
305                         wmb();
306                         /* override allocation time cache coherency attributes. */
307                         h->flags &= ~NVMAP_HANDLE_CACHE_FLAG;
308                         h->flags |= cache_flags;
309
310                         /* Update page attributes, if the memory is allocated
311                          *  from system heap pages.
312                          */
313                         if (cache_flags == NVMAP_HANDLE_INNER_CACHEABLE &&
314                                 h->heap_pgalloc)
315                                 set_pages_array_iwb(h->pgalloc.pages, nr_page);
316                         else if (h->heap_pgalloc)
317                                 set_pages_array_wb(h->pgalloc.pages, nr_page);
318                 }
319         }
320         vma->vm_page_prot = nvmap_pgprot(h, vma->vm_page_prot);
321
322 out:
323         up_read(&current->mm->mmap_sem);
324
325         if (err)
326                 nvmap_handle_put(h);
327         return err;
328 }
329
330 int nvmap_ioctl_get_param(struct file *filp, void __user* arg)
331 {
332         struct nvmap_handle_param op;
333         struct nvmap_client *client = filp->private_data;
334         struct nvmap_handle *h;
335         int err = 0;
336
337         if (copy_from_user(&op, arg, sizeof(op)))
338                 return -EFAULT;
339
340         h = nvmap_get_handle_id(client, op.handle);
341         if (!h)
342                 return -EINVAL;
343
344         switch (op.param) {
345         case NVMAP_HANDLE_PARAM_SIZE:
346                 op.result = h->orig_size;
347                 break;
348         case NVMAP_HANDLE_PARAM_ALIGNMENT:
349                 mutex_lock(&h->lock);
350                 if (!h->alloc)
351                         op.result = 0;
352                 else if (h->heap_pgalloc)
353                         op.result = PAGE_SIZE;
354                 else if (h->carveout->base)
355                         op.result = (h->carveout->base & -h->carveout->base);
356                 else
357                         op.result = SZ_4M;
358                 mutex_unlock(&h->lock);
359                 break;
360         case NVMAP_HANDLE_PARAM_BASE:
361                 if (WARN_ON(!h->alloc || !atomic_add_return(0, &h->pin)))
362                         op.result = -1ul;
363                 else if (!h->heap_pgalloc) {
364                         mutex_lock(&h->lock);
365                         op.result = h->carveout->base;
366                         mutex_unlock(&h->lock);
367                 } else if (h->pgalloc.contig)
368                         op.result = page_to_phys(h->pgalloc.pages[0]);
369                 else if (h->pgalloc.area)
370                         op.result = h->pgalloc.area->iovm_start;
371                 else
372                         op.result = -1ul;
373                 break;
374         case NVMAP_HANDLE_PARAM_HEAP:
375                 if (!h->alloc)
376                         op.result = 0;
377                 else if (!h->heap_pgalloc) {
378                         mutex_lock(&h->lock);
379                         op.result = nvmap_carveout_usage(client, h->carveout);
380                         mutex_unlock(&h->lock);
381                 } else if (h->pgalloc.contig)
382                         op.result = NVMAP_HEAP_SYSMEM;
383                 else
384                         op.result = NVMAP_HEAP_IOVMM;
385                 break;
386         default:
387                 err = -EINVAL;
388                 break;
389         }
390
391         if (!err && copy_to_user(arg, &op, sizeof(op)))
392                 err = -EFAULT;
393
394         nvmap_handle_put(h);
395         return err;
396 }
397
398 int nvmap_ioctl_rw_handle(struct file *filp, int is_read, void __user* arg)
399 {
400         struct nvmap_client *client = filp->private_data;
401         struct nvmap_rw_handle __user *uarg = arg;
402         struct nvmap_rw_handle op;
403         struct nvmap_handle *h;
404         ssize_t copied;
405         int err = 0;
406
407         if (copy_from_user(&op, arg, sizeof(op)))
408                 return -EFAULT;
409
410         if (!op.handle || !op.addr || !op.count || !op.elem_size)
411                 return -EINVAL;
412
413         h = nvmap_get_handle_id(client, op.handle);
414         if (!h)
415                 return -EPERM;
416
417         nvmap_usecount_inc(h);
418
419         trace_nvmap_ioctl_rw_handle(client, h, is_read, op.offset,
420                                     op.addr, op.hmem_stride,
421                                     op.user_stride, op.elem_size, op.count);
422         copied = rw_handle(client, h, is_read, op.offset,
423                            (unsigned long)op.addr, op.hmem_stride,
424                            op.user_stride, op.elem_size, op.count);
425
426         if (copied < 0) {
427                 err = copied;
428                 copied = 0;
429         } else if (copied < (op.count * op.elem_size))
430                 err = -EINTR;
431
432         __put_user(copied, &uarg->count);
433
434         nvmap_usecount_dec(h);
435
436         nvmap_handle_put(h);
437
438         return err;
439 }
440
441 int nvmap_ioctl_cache_maint(struct file *filp, void __user *arg)
442 {
443         struct nvmap_client *client = filp->private_data;
444         struct nvmap_cache_op op;
445         struct vm_area_struct *vma;
446         struct nvmap_vma_priv *vpriv;
447         unsigned long start;
448         unsigned long end;
449         int err = 0;
450
451         if (copy_from_user(&op, arg, sizeof(op)))
452                 return -EFAULT;
453
454         if (!op.handle || !op.addr || op.op < NVMAP_CACHE_OP_WB ||
455             op.op > NVMAP_CACHE_OP_WB_INV)
456                 return -EINVAL;
457
458         down_read(&current->mm->mmap_sem);
459
460         vma = find_vma(current->active_mm, (unsigned long)op.addr);
461         if (!vma || !is_nvmap_vma(vma) ||
462             (unsigned long)op.addr + op.len > vma->vm_end) {
463                 err = -EADDRNOTAVAIL;
464                 goto out;
465         }
466
467         vpriv = (struct nvmap_vma_priv *)vma->vm_private_data;
468
469         if ((unsigned long)vpriv->handle != op.handle) {
470                 err = -EFAULT;
471                 goto out;
472         }
473
474         start = (unsigned long)op.addr - vma->vm_start;
475         end = start + op.len;
476
477         err = cache_maint(client, vpriv->handle, start, end, op.op);
478 out:
479         up_read(&current->mm->mmap_sem);
480         return err;
481 }
482
483 int nvmap_ioctl_free(struct file *filp, unsigned long arg)
484 {
485         struct nvmap_client *client = filp->private_data;
486
487         if (!arg)
488                 return 0;
489
490         nvmap_free_handle_id(client, arg);
491         return 0;
492 }
493
494 static void inner_cache_maint(unsigned int op, void *vaddr, size_t size)
495 {
496         if (op == NVMAP_CACHE_OP_WB_INV)
497                 dmac_flush_range(vaddr, vaddr + size);
498         else if (op == NVMAP_CACHE_OP_INV)
499                 dmac_map_area(vaddr, size, DMA_FROM_DEVICE);
500         else
501                 dmac_map_area(vaddr, size, DMA_TO_DEVICE);
502 }
503
504 static void outer_cache_maint(unsigned int op, unsigned long paddr, size_t size)
505 {
506         if (op == NVMAP_CACHE_OP_WB_INV)
507                 outer_flush_range(paddr, paddr + size);
508         else if (op == NVMAP_CACHE_OP_INV)
509                 outer_inv_range(paddr, paddr + size);
510         else
511                 outer_clean_range(paddr, paddr + size);
512 }
513
514 static void heap_page_cache_maint(struct nvmap_client *client,
515         struct nvmap_handle *h, unsigned long start, unsigned long end,
516         unsigned int op, bool inner, bool outer, pte_t **pte,
517         unsigned long kaddr, pgprot_t prot)
518 {
519         struct page *page;
520         unsigned long paddr;
521         unsigned long next;
522         unsigned long off;
523         size_t size;
524
525         while (start < end) {
526                 page = h->pgalloc.pages[start >> PAGE_SHIFT];
527                 next = min(((start + PAGE_SIZE) & PAGE_MASK), end);
528                 off = start & ~PAGE_MASK;
529                 size = next - start;
530                 paddr = page_to_phys(page) + off;
531
532                 if (inner) {
533                         void *vaddr = (void *)kaddr + off;
534                         BUG_ON(!pte);
535                         BUG_ON(!kaddr);
536                         set_pte_at(&init_mm, kaddr, *pte,
537                                 pfn_pte(__phys_to_pfn(paddr), prot));
538                         flush_tlb_kernel_page(kaddr);
539                         inner_cache_maint(op, vaddr, size);
540                 }
541
542                 if (outer)
543                         outer_cache_maint(op, paddr, size);
544                 start = next;
545         }
546 }
547
548 static bool fast_cache_maint(struct nvmap_client *client, struct nvmap_handle *h,
549         unsigned long start, unsigned long end, unsigned int op)
550 {
551         int ret = false;
552
553 #if defined(CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS)
554         if ((op == NVMAP_CACHE_OP_INV) ||
555                 ((end - start) < FLUSH_CLEAN_BY_SET_WAY_THRESHOLD))
556                 goto out;
557
558         if (op == NVMAP_CACHE_OP_WB_INV)
559                 inner_flush_cache_all();
560         else if (op == NVMAP_CACHE_OP_WB)
561                 inner_clean_cache_all();
562
563         if (h->heap_pgalloc && (h->flags != NVMAP_HANDLE_INNER_CACHEABLE)) {
564                 heap_page_cache_maint(client, h, start, end, op,
565                                 false, true, NULL, 0, 0);
566         } else if (h->flags != NVMAP_HANDLE_INNER_CACHEABLE) {
567                 start += h->carveout->base;
568                 end += h->carveout->base;
569                 outer_cache_maint(op, start, end - start);
570         }
571         ret = true;
572 out:
573 #endif
574         return ret;
575 }
576
577 static int cache_maint(struct nvmap_client *client, struct nvmap_handle *h,
578                        unsigned long start, unsigned long end, unsigned int op)
579 {
580         pgprot_t prot;
581         pte_t **pte = NULL;
582         unsigned long kaddr;
583         unsigned long loop;
584         int err = 0;
585
586         h = nvmap_handle_get(h);
587         if (!h)
588                 return -EFAULT;
589
590         if (!h->alloc) {
591                 err = -EFAULT;
592                 goto out;
593         }
594
595         trace_cache_maint(client, h, start, end, op);
596         wmb();
597         if (h->flags == NVMAP_HANDLE_UNCACHEABLE ||
598             h->flags == NVMAP_HANDLE_WRITE_COMBINE || start == end)
599                 goto out;
600
601         if (fast_cache_maint(client, h, start, end, op))
602                 goto out;
603
604         prot = nvmap_pgprot(h, pgprot_kernel);
605         pte = nvmap_alloc_pte(client->dev, (void **)&kaddr);
606         if (IS_ERR(pte)) {
607                 err = PTR_ERR(pte);
608                 pte = NULL;
609                 goto out;
610         }
611
612         if (h->heap_pgalloc) {
613                 heap_page_cache_maint(client, h, start, end, op, true,
614                         (h->flags == NVMAP_HANDLE_INNER_CACHEABLE) ? false : true,
615                         pte, kaddr, prot);
616                 goto out;
617         }
618
619         if (start > h->size || end > h->size) {
620                 nvmap_warn(client, "cache maintenance outside handle\n");
621                 return -EINVAL;
622         }
623
624         /* lock carveout from relocation by mapcount */
625         nvmap_usecount_inc(h);
626
627         start += h->carveout->base;
628         end += h->carveout->base;
629
630         loop = start;
631
632         while (loop < end) {
633                 unsigned long next = (loop + PAGE_SIZE) & PAGE_MASK;
634                 void *base = (void *)kaddr + (loop & ~PAGE_MASK);
635                 next = min(next, end);
636
637                 set_pte_at(&init_mm, kaddr, *pte,
638                            pfn_pte(__phys_to_pfn(loop), prot));
639                 flush_tlb_kernel_page(kaddr);
640
641                 inner_cache_maint(op, base, next - loop);
642                 loop = next;
643         }
644
645         if (h->flags != NVMAP_HANDLE_INNER_CACHEABLE)
646                 outer_cache_maint(op, start, end - start);
647
648         /* unlock carveout */
649         nvmap_usecount_dec(h);
650
651 out:
652         if (pte)
653                 nvmap_free_pte(client->dev, pte);
654         nvmap_handle_put(h);
655         return err;
656 }
657
658 static int rw_handle_page(struct nvmap_handle *h, int is_read,
659                           phys_addr_t start, unsigned long rw_addr,
660                           unsigned long bytes, unsigned long kaddr, pte_t *pte)
661 {
662         pgprot_t prot = nvmap_pgprot(h, pgprot_kernel);
663         unsigned long end = start + bytes;
664         int err = 0;
665
666         while (!err && start < end) {
667                 struct page *page = NULL;
668                 phys_addr_t phys;
669                 size_t count;
670                 void *src;
671
672                 if (!h->heap_pgalloc) {
673                         phys = h->carveout->base + start;
674                 } else {
675                         page = h->pgalloc.pages[start >> PAGE_SHIFT];
676                         BUG_ON(!page);
677                         get_page(page);
678                         phys = page_to_phys(page) + (start & ~PAGE_MASK);
679                 }
680
681                 set_pte_at(&init_mm, kaddr, pte,
682                            pfn_pte(__phys_to_pfn(phys), prot));
683                 flush_tlb_kernel_page(kaddr);
684
685                 src = (void *)kaddr + (phys & ~PAGE_MASK);
686                 phys = PAGE_SIZE - (phys & ~PAGE_MASK);
687                 count = min_t(size_t, end - start, phys);
688
689                 if (is_read)
690                         err = copy_to_user((void *)rw_addr, src, count);
691                 else
692                         err = copy_from_user(src, (void *)rw_addr, count);
693
694                 if (err)
695                         err = -EFAULT;
696
697                 rw_addr += count;
698                 start += count;
699
700                 if (page)
701                         put_page(page);
702         }
703
704         return err;
705 }
706
707 static ssize_t rw_handle(struct nvmap_client *client, struct nvmap_handle *h,
708                          int is_read, unsigned long h_offs,
709                          unsigned long sys_addr, unsigned long h_stride,
710                          unsigned long sys_stride, unsigned long elem_size,
711                          unsigned long count)
712 {
713         ssize_t copied = 0;
714         pte_t **pte;
715         void *addr;
716         int ret = 0;
717
718         if (!elem_size)
719                 return -EINVAL;
720
721         if (!h->alloc)
722                 return -EFAULT;
723
724         if (elem_size == h_stride && elem_size == sys_stride) {
725                 elem_size *= count;
726                 h_stride = elem_size;
727                 sys_stride = elem_size;
728                 count = 1;
729         }
730
731         pte = nvmap_alloc_pte(client->dev, &addr);
732         if (IS_ERR(pte))
733                 return PTR_ERR(pte);
734
735         while (count--) {
736                 if (h_offs + elem_size > h->size) {
737                         nvmap_warn(client, "read/write outside of handle\n");
738                         ret = -EFAULT;
739                         break;
740                 }
741                 if (is_read)
742                         cache_maint(client, h, h_offs,
743                                 h_offs + elem_size, NVMAP_CACHE_OP_INV);
744
745                 ret = rw_handle_page(h, is_read, h_offs, sys_addr,
746                                      elem_size, (unsigned long)addr, *pte);
747
748                 if (ret)
749                         break;
750
751                 if (!is_read)
752                         cache_maint(client, h, h_offs,
753                                 h_offs + elem_size, NVMAP_CACHE_OP_WB);
754
755                 copied += elem_size;
756                 sys_addr += sys_stride;
757                 h_offs += h_stride;
758         }
759
760         nvmap_free_pte(client->dev, pte);
761         return ret ?: copied;
762 }