Merge commit 'main-jb-2012.08.03-B4' into t114-0806
[linux-2.6.git] / drivers / video / tegra / nvmap / nvmap_ioctl.c
1 /*
2  * drivers/video/tegra/nvmap/nvmap_ioctl.c
3  *
4  * User-space interface to nvmap
5  *
6  * Copyright (c) 2011, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include <linux/dma-mapping.h>
24 #include <linux/fs.h>
25 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/uaccess.h>
28 #include <linux/nvmap.h>
29
30 #include <asm/cacheflush.h>
31 #include <asm/outercache.h>
32 #include <asm/tlbflush.h>
33
34 #include <mach/iovmm.h>
35 #include <trace/events/nvmap.h>
36
37 #include "nvmap_ioctl.h"
38 #include "nvmap.h"
39 #include "nvmap_common.h"
40
41 static ssize_t rw_handle(struct nvmap_client *client, struct nvmap_handle *h,
42                          int is_read, unsigned long h_offs,
43                          unsigned long sys_addr, unsigned long h_stride,
44                          unsigned long sys_stride, unsigned long elem_size,
45                          unsigned long count);
46
47 static int cache_maint(struct nvmap_client *client, struct nvmap_handle *h,
48                        unsigned long start, unsigned long end, unsigned int op);
49
50
51 int nvmap_ioctl_pinop(struct file *filp, bool is_pin, void __user *arg)
52 {
53         struct nvmap_pin_handle op;
54         struct nvmap_handle *h;
55         unsigned long on_stack[16];
56         unsigned long *refs;
57         unsigned long __user *output;
58         unsigned int i;
59         int err = 0;
60
61         if (copy_from_user(&op, arg, sizeof(op)))
62                 return -EFAULT;
63
64         if (!op.count)
65                 return -EINVAL;
66
67         if (op.count > 1) {
68                 size_t bytes = op.count * sizeof(*refs); /* kcalloc below will catch overflow. */
69
70                 if (op.count > ARRAY_SIZE(on_stack))
71                         refs = kcalloc(op.count, sizeof(*refs), GFP_KERNEL);
72                 else
73                         refs = on_stack;
74
75                 if (!refs)
76                         return -ENOMEM;
77
78                 if (copy_from_user(refs, (void *)op.handles, bytes)) {
79                         err = -EFAULT;
80                         goto out;
81                 }
82         } else {
83                 refs = on_stack;
84                 on_stack[0] = (unsigned long)op.handles;
85         }
86
87         trace_nvmap_ioctl_pinop(filp->private_data, is_pin, op.count, refs);
88         if (is_pin)
89                 err = nvmap_pin_ids(filp->private_data, op.count, refs);
90         else
91                 nvmap_unpin_ids(filp->private_data, op.count, refs);
92
93         /* skip the output stage on unpin */
94         if (err || !is_pin)
95                 goto out;
96
97         /* it is guaranteed that if nvmap_pin_ids returns 0 that
98          * all of the handle_ref objects are valid, so dereferencing
99          * directly here is safe */
100         if (op.count > 1)
101                 output = (unsigned long __user *)op.addr;
102         else {
103                 struct nvmap_pin_handle __user *tmp = arg;
104                 output = (unsigned long __user *)&(tmp->addr);
105         }
106
107         if (!output)
108                 goto out;
109
110         for (i = 0; i < op.count && !err; i++) {
111                 unsigned long addr;
112
113                 h = (struct nvmap_handle *)refs[i];
114
115                 if (h->heap_pgalloc && h->pgalloc.contig)
116                         addr = page_to_phys(h->pgalloc.pages[0]);
117                 else if (h->heap_pgalloc)
118                         addr = h->pgalloc.area->iovm_start;
119                 else
120                         addr = h->carveout->base;
121
122                 err = put_user(addr, &output[i]);
123         }
124
125         if (err)
126                 nvmap_unpin_ids(filp->private_data, op.count, refs);
127
128 out:
129         if (refs != on_stack)
130                 kfree(refs);
131
132         return err;
133 }
134
135 int nvmap_ioctl_getid(struct file *filp, void __user *arg)
136 {
137         struct nvmap_client *client = filp->private_data;
138         struct nvmap_create_handle op;
139         struct nvmap_handle *h = NULL;
140
141         if (copy_from_user(&op, arg, sizeof(op)))
142                 return -EFAULT;
143
144         if (!op.handle)
145                 return -EINVAL;
146
147         h = nvmap_get_handle_id(client, op.handle);
148
149         if (!h)
150                 return -EPERM;
151
152         op.id = (__u32)h;
153         if (client == h->owner)
154                 h->global = true;
155
156         nvmap_handle_put(h);
157
158         return copy_to_user(arg, &op, sizeof(op)) ? -EFAULT : 0;
159 }
160
161 int nvmap_ioctl_alloc(struct file *filp, void __user *arg)
162 {
163         struct nvmap_alloc_handle op;
164         struct nvmap_client *client = filp->private_data;
165
166         if (copy_from_user(&op, arg, sizeof(op)))
167                 return -EFAULT;
168
169         if (!op.handle)
170                 return -EINVAL;
171
172         if (op.align & (op.align - 1))
173                 return -EINVAL;
174
175         /* user-space handles are aligned to page boundaries, to prevent
176          * data leakage. */
177         op.align = max_t(size_t, op.align, PAGE_SIZE);
178
179         return nvmap_alloc_handle_id(client, op.handle, op.heap_mask,
180                                      op.align, op.flags);
181 }
182
183 int nvmap_ioctl_create(struct file *filp, unsigned int cmd, void __user *arg)
184 {
185         struct nvmap_create_handle op;
186         struct nvmap_handle_ref *ref = NULL;
187         struct nvmap_client *client = filp->private_data;
188         int err = 0;
189
190         if (copy_from_user(&op, arg, sizeof(op)))
191                 return -EFAULT;
192
193         if (!client)
194                 return -ENODEV;
195
196         if (cmd == NVMAP_IOC_CREATE) {
197                 ref = nvmap_create_handle(client, PAGE_ALIGN(op.size));
198                 if (!IS_ERR(ref))
199                         ref->handle->orig_size = op.size;
200         } else if (cmd == NVMAP_IOC_FROM_ID) {
201                 ref = nvmap_duplicate_handle_id(client, op.id);
202         } else {
203                 return -EINVAL;
204         }
205
206         if (IS_ERR(ref))
207                 return PTR_ERR(ref);
208
209         op.handle = nvmap_ref_to_id(ref);
210         if (copy_to_user(arg, &op, sizeof(op))) {
211                 err = -EFAULT;
212                 nvmap_free_handle_id(client, op.handle);
213         }
214
215         return err;
216 }
217
218 int nvmap_map_into_caller_ptr(struct file *filp, void __user *arg)
219 {
220         struct nvmap_client *client = filp->private_data;
221         struct nvmap_map_caller op;
222         struct nvmap_vma_priv *vpriv;
223         struct vm_area_struct *vma;
224         struct nvmap_handle *h = NULL;
225         unsigned int cache_flags;
226         int err = 0;
227
228         if (copy_from_user(&op, arg, sizeof(op)))
229                 return -EFAULT;
230
231         if (!op.handle)
232                 return -EINVAL;
233
234         h = nvmap_get_handle_id(client, op.handle);
235
236         if (!h)
237                 return -EPERM;
238
239         trace_nvmap_map_into_caller_ptr(client, h, op.offset,
240                                         op.length, op.flags);
241         down_read(&current->mm->mmap_sem);
242
243         vma = find_vma(current->mm, op.addr);
244         if (!vma || !vma->vm_private_data) {
245                 err = -ENOMEM;
246                 goto out;
247         }
248
249         if (op.offset & ~PAGE_MASK) {
250                 err = -EFAULT;
251                 goto out;
252         }
253
254         if (op.offset > h->size || (op.offset + op.length) > h->size) {
255                 err = -EADDRNOTAVAIL;
256                 goto out;
257         }
258
259         vpriv = vma->vm_private_data;
260         BUG_ON(!vpriv);
261
262         /* the VMA must exactly match the requested mapping operation, and the
263          * VMA that is targetted must have been created by this driver
264          */
265         if ((vma->vm_start != op.addr) || !is_nvmap_vma(vma) ||
266             (vma->vm_end-vma->vm_start != op.length)) {
267                 err = -EPERM;
268                 goto out;
269         }
270
271         /* verify that each mmap() system call creates a unique VMA */
272
273         if (vpriv->handle && (h == vpriv->handle)) {
274                 goto out;
275         } else if (vpriv->handle) {
276                 err = -EADDRNOTAVAIL;
277                 goto out;
278         }
279
280         nvmap_usecount_inc(h);
281
282         if (!h->heap_pgalloc && (h->carveout->base & ~PAGE_MASK)) {
283                 nvmap_usecount_dec(h);
284                 err = -EFAULT;
285                 goto out;
286         }
287
288         vpriv->handle = h;
289         vpriv->offs = op.offset;
290
291         cache_flags = op.flags & NVMAP_HANDLE_CACHE_FLAG;
292         if ((cache_flags == NVMAP_HANDLE_INNER_CACHEABLE ||
293              cache_flags == NVMAP_HANDLE_CACHEABLE) &&
294             (h->flags == NVMAP_HANDLE_UNCACHEABLE ||
295              h->flags == NVMAP_HANDLE_WRITE_COMBINE)) {
296                 if (h->size & ~PAGE_MASK) {
297                         pr_err("\n%s:attempt to convert a buffer from uc/wc to"
298                                 " wb, whose size is not a multiple of page size."
299                                 " request ignored.\n", __func__);
300                 } else {
301                         unsigned int nr_page = h->size >> PAGE_SHIFT;
302                         wmb();
303                         /* override allocation time cache coherency attributes. */
304                         h->flags &= ~NVMAP_HANDLE_CACHE_FLAG;
305                         h->flags |= cache_flags;
306
307                         /* Update page attributes, if the memory is allocated
308                          *  from system heap pages.
309                          */
310                         if (cache_flags == NVMAP_HANDLE_INNER_CACHEABLE &&
311                                 h->heap_pgalloc)
312                                 set_pages_array_iwb(h->pgalloc.pages, nr_page);
313                         else if (h->heap_pgalloc)
314                                 set_pages_array_wb(h->pgalloc.pages, nr_page);
315                 }
316         }
317         vma->vm_page_prot = nvmap_pgprot(h, vma->vm_page_prot);
318
319 out:
320         up_read(&current->mm->mmap_sem);
321
322         if (err)
323                 nvmap_handle_put(h);
324         return err;
325 }
326
327 int nvmap_ioctl_get_param(struct file *filp, void __user* arg)
328 {
329         struct nvmap_handle_param op;
330         struct nvmap_client *client = filp->private_data;
331         struct nvmap_handle *h;
332         int err = 0;
333
334         if (copy_from_user(&op, arg, sizeof(op)))
335                 return -EFAULT;
336
337         h = nvmap_get_handle_id(client, op.handle);
338         if (!h)
339                 return -EINVAL;
340
341         switch (op.param) {
342         case NVMAP_HANDLE_PARAM_SIZE:
343                 op.result = h->orig_size;
344                 break;
345         case NVMAP_HANDLE_PARAM_ALIGNMENT:
346                 mutex_lock(&h->lock);
347                 if (!h->alloc)
348                         op.result = 0;
349                 else if (h->heap_pgalloc)
350                         op.result = PAGE_SIZE;
351                 else if (h->carveout->base)
352                         op.result = (h->carveout->base & -h->carveout->base);
353                 else
354                         op.result = SZ_4M;
355                 mutex_unlock(&h->lock);
356                 break;
357         case NVMAP_HANDLE_PARAM_BASE:
358                 if (WARN_ON(!h->alloc || !atomic_add_return(0, &h->pin)))
359                         op.result = -1ul;
360                 else if (!h->heap_pgalloc) {
361                         mutex_lock(&h->lock);
362                         op.result = h->carveout->base;
363                         mutex_unlock(&h->lock);
364                 } else if (h->pgalloc.contig)
365                         op.result = page_to_phys(h->pgalloc.pages[0]);
366                 else if (h->pgalloc.area)
367                         op.result = h->pgalloc.area->iovm_start;
368                 else
369                         op.result = -1ul;
370                 break;
371         case NVMAP_HANDLE_PARAM_HEAP:
372                 if (!h->alloc)
373                         op.result = 0;
374                 else if (!h->heap_pgalloc) {
375                         mutex_lock(&h->lock);
376                         op.result = nvmap_carveout_usage(client, h->carveout);
377                         mutex_unlock(&h->lock);
378                 } else if (h->pgalloc.contig)
379                         op.result = NVMAP_HEAP_SYSMEM;
380                 else
381                         op.result = NVMAP_HEAP_IOVMM;
382                 break;
383         default:
384                 err = -EINVAL;
385                 break;
386         }
387
388         if (!err && copy_to_user(arg, &op, sizeof(op)))
389                 err = -EFAULT;
390
391         nvmap_handle_put(h);
392         return err;
393 }
394
395 int nvmap_ioctl_rw_handle(struct file *filp, int is_read, void __user* arg)
396 {
397         struct nvmap_client *client = filp->private_data;
398         struct nvmap_rw_handle __user *uarg = arg;
399         struct nvmap_rw_handle op;
400         struct nvmap_handle *h;
401         ssize_t copied;
402         int err = 0;
403
404         if (copy_from_user(&op, arg, sizeof(op)))
405                 return -EFAULT;
406
407         if (!op.handle || !op.addr || !op.count || !op.elem_size)
408                 return -EINVAL;
409
410         h = nvmap_get_handle_id(client, op.handle);
411         if (!h)
412                 return -EPERM;
413
414         nvmap_usecount_inc(h);
415
416         trace_nvmap_ioctl_rw_handle(client, h, is_read, op.offset,
417                                     op.addr, op.hmem_stride,
418                                     op.user_stride, op.elem_size, op.count);
419         copied = rw_handle(client, h, is_read, op.offset,
420                            (unsigned long)op.addr, op.hmem_stride,
421                            op.user_stride, op.elem_size, op.count);
422
423         if (copied < 0) {
424                 err = copied;
425                 copied = 0;
426         } else if (copied < (op.count * op.elem_size))
427                 err = -EINTR;
428
429         __put_user(copied, &uarg->count);
430
431         nvmap_usecount_dec(h);
432
433         nvmap_handle_put(h);
434
435         return err;
436 }
437
438 int nvmap_ioctl_cache_maint(struct file *filp, void __user *arg)
439 {
440         struct nvmap_client *client = filp->private_data;
441         struct nvmap_cache_op op;
442         struct vm_area_struct *vma;
443         struct nvmap_vma_priv *vpriv;
444         unsigned long start;
445         unsigned long end;
446         int err = 0;
447
448         if (copy_from_user(&op, arg, sizeof(op)))
449                 return -EFAULT;
450
451         if (!op.handle || !op.addr || op.op < NVMAP_CACHE_OP_WB ||
452             op.op > NVMAP_CACHE_OP_WB_INV)
453                 return -EINVAL;
454
455         down_read(&current->mm->mmap_sem);
456
457         vma = find_vma(current->active_mm, (unsigned long)op.addr);
458         if (!vma || !is_nvmap_vma(vma) ||
459             (unsigned long)op.addr + op.len > vma->vm_end) {
460                 err = -EADDRNOTAVAIL;
461                 goto out;
462         }
463
464         vpriv = (struct nvmap_vma_priv *)vma->vm_private_data;
465
466         if ((unsigned long)vpriv->handle != op.handle) {
467                 err = -EFAULT;
468                 goto out;
469         }
470
471         start = (unsigned long)op.addr - vma->vm_start;
472         end = start + op.len;
473
474         err = cache_maint(client, vpriv->handle, start, end, op.op);
475 out:
476         up_read(&current->mm->mmap_sem);
477         return err;
478 }
479
480 int nvmap_ioctl_free(struct file *filp, unsigned long arg)
481 {
482         struct nvmap_client *client = filp->private_data;
483
484         if (!arg)
485                 return 0;
486
487         nvmap_free_handle_id(client, arg);
488         return 0;
489 }
490
491 static void inner_cache_maint(unsigned int op, void *vaddr, size_t size)
492 {
493         if (op == NVMAP_CACHE_OP_WB_INV)
494                 dmac_flush_range(vaddr, vaddr + size);
495         else if (op == NVMAP_CACHE_OP_INV)
496                 dmac_map_area(vaddr, size, DMA_FROM_DEVICE);
497         else
498                 dmac_map_area(vaddr, size, DMA_TO_DEVICE);
499 }
500
501 static void outer_cache_maint(unsigned int op, unsigned long paddr, size_t size)
502 {
503         if (op == NVMAP_CACHE_OP_WB_INV)
504                 outer_flush_range(paddr, paddr + size);
505         else if (op == NVMAP_CACHE_OP_INV)
506                 outer_inv_range(paddr, paddr + size);
507         else
508                 outer_clean_range(paddr, paddr + size);
509 }
510
511 static void heap_page_cache_maint(struct nvmap_client *client,
512         struct nvmap_handle *h, unsigned long start, unsigned long end,
513         unsigned int op, bool inner, bool outer, pte_t **pte,
514         unsigned long kaddr, pgprot_t prot)
515 {
516         struct page *page;
517         unsigned long paddr;
518         unsigned long next;
519         unsigned long off;
520         size_t size;
521
522         while (start < end) {
523                 page = h->pgalloc.pages[start >> PAGE_SHIFT];
524                 next = min(((start + PAGE_SIZE) & PAGE_MASK), end);
525                 off = start & ~PAGE_MASK;
526                 size = next - start;
527                 paddr = page_to_phys(page) + off;
528
529                 if (inner) {
530                         void *vaddr = (void *)kaddr + off;
531                         BUG_ON(!pte);
532                         BUG_ON(!kaddr);
533                         set_pte_at(&init_mm, kaddr, *pte,
534                                 pfn_pte(__phys_to_pfn(paddr), prot));
535                         flush_tlb_kernel_page(kaddr);
536                         inner_cache_maint(op, vaddr, size);
537                 }
538
539                 if (outer)
540                         outer_cache_maint(op, paddr, size);
541                 start = next;
542         }
543 }
544
545 static bool fast_cache_maint(struct nvmap_client *client, struct nvmap_handle *h,
546         unsigned long start, unsigned long end, unsigned int op)
547 {
548         int ret = false;
549
550 #if defined(CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS)
551         if ((op == NVMAP_CACHE_OP_INV) ||
552                 ((end - start) < FLUSH_CLEAN_BY_SET_WAY_THRESHOLD))
553                 goto out;
554
555         if (op == NVMAP_CACHE_OP_WB_INV)
556                 inner_flush_cache_all();
557         else if (op == NVMAP_CACHE_OP_WB)
558                 inner_clean_cache_all();
559
560         if (h->heap_pgalloc && (h->flags != NVMAP_HANDLE_INNER_CACHEABLE)) {
561                 heap_page_cache_maint(client, h, start, end, op,
562                                 false, true, NULL, 0, 0);
563         } else if (h->flags != NVMAP_HANDLE_INNER_CACHEABLE) {
564                 start += h->carveout->base;
565                 end += h->carveout->base;
566                 outer_cache_maint(op, start, end - start);
567         }
568         ret = true;
569 out:
570 #endif
571         return ret;
572 }
573
574 static int cache_maint(struct nvmap_client *client, struct nvmap_handle *h,
575                        unsigned long start, unsigned long end, unsigned int op)
576 {
577         pgprot_t prot;
578         pte_t **pte = NULL;
579         unsigned long kaddr;
580         unsigned long loop;
581         int err = 0;
582
583         h = nvmap_handle_get(h);
584         if (!h)
585                 return -EFAULT;
586
587         if (!h->alloc) {
588                 err = -EFAULT;
589                 goto out;
590         }
591
592         trace_cache_maint(client, h, start, end, op);
593         wmb();
594         if (h->flags == NVMAP_HANDLE_UNCACHEABLE ||
595             h->flags == NVMAP_HANDLE_WRITE_COMBINE || start == end)
596                 goto out;
597
598         if (fast_cache_maint(client, h, start, end, op))
599                 goto out;
600
601         prot = nvmap_pgprot(h, pgprot_kernel);
602         pte = nvmap_alloc_pte(client->dev, (void **)&kaddr);
603         if (IS_ERR(pte)) {
604                 err = PTR_ERR(pte);
605                 pte = NULL;
606                 goto out;
607         }
608
609         if (h->heap_pgalloc) {
610                 heap_page_cache_maint(client, h, start, end, op, true,
611                         (h->flags == NVMAP_HANDLE_INNER_CACHEABLE) ? false : true,
612                         pte, kaddr, prot);
613                 goto out;
614         }
615
616         if (start > h->size || end > h->size) {
617                 nvmap_warn(client, "cache maintenance outside handle\n");
618                 return -EINVAL;
619         }
620
621         /* lock carveout from relocation by mapcount */
622         nvmap_usecount_inc(h);
623
624         start += h->carveout->base;
625         end += h->carveout->base;
626
627         loop = start;
628
629         while (loop < end) {
630                 unsigned long next = (loop + PAGE_SIZE) & PAGE_MASK;
631                 void *base = (void *)kaddr + (loop & ~PAGE_MASK);
632                 next = min(next, end);
633
634                 set_pte_at(&init_mm, kaddr, *pte,
635                            pfn_pte(__phys_to_pfn(loop), prot));
636                 flush_tlb_kernel_page(kaddr);
637
638                 inner_cache_maint(op, base, next - loop);
639                 loop = next;
640         }
641
642         if (h->flags != NVMAP_HANDLE_INNER_CACHEABLE)
643                 outer_cache_maint(op, start, end - start);
644
645         /* unlock carveout */
646         nvmap_usecount_dec(h);
647
648 out:
649         if (pte)
650                 nvmap_free_pte(client->dev, pte);
651         nvmap_handle_put(h);
652         return err;
653 }
654
655 static int rw_handle_page(struct nvmap_handle *h, int is_read,
656                           phys_addr_t start, unsigned long rw_addr,
657                           unsigned long bytes, unsigned long kaddr, pte_t *pte)
658 {
659         pgprot_t prot = nvmap_pgprot(h, pgprot_kernel);
660         unsigned long end = start + bytes;
661         int err = 0;
662
663         while (!err && start < end) {
664                 struct page *page = NULL;
665                 phys_addr_t phys;
666                 size_t count;
667                 void *src;
668
669                 if (!h->heap_pgalloc) {
670                         phys = h->carveout->base + start;
671                 } else {
672                         page = h->pgalloc.pages[start >> PAGE_SHIFT];
673                         BUG_ON(!page);
674                         get_page(page);
675                         phys = page_to_phys(page) + (start & ~PAGE_MASK);
676                 }
677
678                 set_pte_at(&init_mm, kaddr, pte,
679                            pfn_pte(__phys_to_pfn(phys), prot));
680                 flush_tlb_kernel_page(kaddr);
681
682                 src = (void *)kaddr + (phys & ~PAGE_MASK);
683                 phys = PAGE_SIZE - (phys & ~PAGE_MASK);
684                 count = min_t(size_t, end - start, phys);
685
686                 if (is_read)
687                         err = copy_to_user((void *)rw_addr, src, count);
688                 else
689                         err = copy_from_user(src, (void *)rw_addr, count);
690
691                 if (err)
692                         err = -EFAULT;
693
694                 rw_addr += count;
695                 start += count;
696
697                 if (page)
698                         put_page(page);
699         }
700
701         return err;
702 }
703
704 static ssize_t rw_handle(struct nvmap_client *client, struct nvmap_handle *h,
705                          int is_read, unsigned long h_offs,
706                          unsigned long sys_addr, unsigned long h_stride,
707                          unsigned long sys_stride, unsigned long elem_size,
708                          unsigned long count)
709 {
710         ssize_t copied = 0;
711         pte_t **pte;
712         void *addr;
713         int ret = 0;
714
715         if (!elem_size)
716                 return -EINVAL;
717
718         if (!h->alloc)
719                 return -EFAULT;
720
721         if (elem_size == h_stride && elem_size == sys_stride) {
722                 elem_size *= count;
723                 h_stride = elem_size;
724                 sys_stride = elem_size;
725                 count = 1;
726         }
727
728         pte = nvmap_alloc_pte(client->dev, &addr);
729         if (IS_ERR(pte))
730                 return PTR_ERR(pte);
731
732         while (count--) {
733                 if (h_offs + elem_size > h->size) {
734                         nvmap_warn(client, "read/write outside of handle\n");
735                         ret = -EFAULT;
736                         break;
737                 }
738                 if (is_read)
739                         cache_maint(client, h, h_offs,
740                                 h_offs + elem_size, NVMAP_CACHE_OP_INV);
741
742                 ret = rw_handle_page(h, is_read, h_offs, sys_addr,
743                                      elem_size, (unsigned long)addr, *pte);
744
745                 if (ret)
746                         break;
747
748                 if (!is_read)
749                         cache_maint(client, h, h_offs,
750                                 h_offs + elem_size, NVMAP_CACHE_OP_WB);
751
752                 copied += elem_size;
753                 sys_addr += sys_stride;
754                 h_offs += h_stride;
755         }
756
757         nvmap_free_pte(client->dev, pte);
758         return ret ?: copied;
759 }