video: tegra: nvmap: set handle dmabuf to NULL early
[linux-3.10.git] / drivers / video / tegra / nvmap / nvmap_handle.c
1 /*
2  * drivers/video/tegra/nvmap/nvmap_handle.c
3  *
4  * Handle allocation and freeing routines for nvmap
5  *
6  * Copyright (c) 2009-2014, NVIDIA CORPORATION. All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #define pr_fmt(fmt)     "%s: " fmt, __func__
24
25 #include <linux/err.h>
26 #include <linux/io.h>
27 #include <linux/kernel.h>
28 #include <linux/list.h>
29 #include <linux/mm.h>
30 #include <linux/rbtree.h>
31 #include <linux/dma-buf.h>
32 #include <linux/moduleparam.h>
33 #include <linux/nvmap.h>
34 #include <linux/tegra-soc.h>
35
36 #include <asm/pgtable.h>
37
38 #include <trace/events/nvmap.h>
39
40 #include "nvmap_priv.h"
41 #include "nvmap_ioctl.h"
42
43 bool zero_memory;
44
45 static int zero_memory_set(const char *arg, const struct kernel_param *kp)
46 {
47         param_set_bool(arg, kp);
48         nvmap_page_pool_clear();
49         return 0;
50 }
51
52 static struct kernel_param_ops zero_memory_ops = {
53         .get = param_get_bool,
54         .set = zero_memory_set,
55 };
56
57 module_param_cb(zero_memory, &zero_memory_ops, &zero_memory, 0644);
58
59 u32 nvmap_max_handle_count;
60
61 /* handles may be arbitrarily large (16+MiB), and any handle allocated from
62  * the kernel (i.e., not a carveout handle) includes its array of pages. to
63  * preserve kmalloc space, if the array of pages exceeds PAGELIST_VMALLOC_MIN,
64  * the array is allocated using vmalloc. */
65 #define PAGELIST_VMALLOC_MIN    (PAGE_SIZE)
66
67 void *nvmap_altalloc(size_t len)
68 {
69         if (len > PAGELIST_VMALLOC_MIN)
70                 return vmalloc(len);
71         else
72                 return kmalloc(len, GFP_KERNEL);
73 }
74
75 void nvmap_altfree(void *ptr, size_t len)
76 {
77         if (!ptr)
78                 return;
79
80         if (len > PAGELIST_VMALLOC_MIN)
81                 vfree(ptr);
82         else
83                 kfree(ptr);
84 }
85
86 void _nvmap_handle_free(struct nvmap_handle *h)
87 {
88         unsigned int i, nr_page, page_index = 0;
89 #if defined(CONFIG_NVMAP_PAGE_POOLS) && \
90         !defined(CONFIG_NVMAP_FORCE_ZEROED_USER_PAGES)
91         struct nvmap_page_pool *pool;
92 #endif
93
94         if (h->nvhost_priv)
95                 h->nvhost_priv_delete(h->nvhost_priv);
96
97         if (nvmap_handle_remove(nvmap_dev, h) != 0)
98                 return;
99
100         if (!h->alloc)
101                 goto out;
102
103         nvmap_stats_inc(NS_RELEASE, h->size);
104         nvmap_stats_dec(NS_TOTAL, PAGE_ALIGN(h->orig_size));
105         if (!h->heap_pgalloc) {
106                 nvmap_heap_free(h->carveout);
107                 goto out;
108         }
109
110         nr_page = DIV_ROUND_UP(h->size, PAGE_SIZE);
111
112         BUG_ON(h->size & ~PAGE_MASK);
113         BUG_ON(!h->pgalloc.pages);
114
115 #ifdef NVMAP_LAZY_VFREE
116         if (h->vaddr)
117                 vm_unmap_ram(h->vaddr, h->size >> PAGE_SHIFT);
118 #endif
119
120         for (i = 0; i < nr_page; i++)
121                 h->pgalloc.pages[i] = nvmap_to_page(h->pgalloc.pages[i]);
122
123 #if defined(CONFIG_NVMAP_PAGE_POOLS) && \
124         !defined(CONFIG_NVMAP_FORCE_ZEROED_USER_PAGES)
125         if (!zero_memory) {
126                 pool = &nvmap_dev->pool;
127
128                 nvmap_page_pool_lock(pool);
129                 page_index = __nvmap_page_pool_fill_lots_locked(pool,
130                                                 h->pgalloc.pages, nr_page);
131                 nvmap_page_pool_unlock(pool);
132         }
133 #endif
134
135         for (i = page_index; i < nr_page; i++)
136                 __free_page(h->pgalloc.pages[i]);
137
138         nvmap_altfree(h->pgalloc.pages, nr_page * sizeof(struct page *));
139
140 out:
141         kfree(h);
142 }
143
144 static struct page *nvmap_alloc_pages_exact(gfp_t gfp, size_t size)
145 {
146         struct page *page, *p, *e;
147         unsigned int order;
148
149         size = PAGE_ALIGN(size);
150         order = get_order(size);
151         page = alloc_pages(gfp, order);
152
153         if (!page)
154                 return NULL;
155
156         split_page(page, order);
157         e = page + (1 << order);
158         for (p = page + (size >> PAGE_SHIFT); p < e; p++)
159                 __free_page(p);
160
161         return page;
162 }
163
164 static int handle_page_alloc(struct nvmap_client *client,
165                              struct nvmap_handle *h, bool contiguous)
166 {
167         size_t size = PAGE_ALIGN(h->size);
168         unsigned int nr_page = size >> PAGE_SHIFT;
169         pgprot_t prot;
170         unsigned int i = 0, page_index = 0;
171         struct page **pages;
172 #ifdef CONFIG_NVMAP_PAGE_POOLS
173         struct nvmap_page_pool *pool = NULL;
174 #endif
175         gfp_t gfp = GFP_NVMAP;
176
177         if (zero_memory)
178                 gfp |= __GFP_ZERO;
179
180         pages = nvmap_altalloc(nr_page * sizeof(*pages));
181         if (!pages)
182                 return -ENOMEM;
183
184         prot = nvmap_pgprot(h, PG_PROT_KERNEL);
185
186         if (contiguous) {
187                 struct page *page;
188                 page = nvmap_alloc_pages_exact(gfp, size);
189                 if (!page)
190                         goto fail;
191
192                 for (i = 0; i < nr_page; i++)
193                         pages[i] = nth_page(page, i);
194
195         } else {
196 #ifdef CONFIG_NVMAP_PAGE_POOLS
197                 pool = &nvmap_dev->pool;
198
199                 /*
200                  * Get as many pages from the pools as possible.
201                  */
202                 nvmap_page_pool_lock(pool);
203                 page_index = __nvmap_page_pool_alloc_lots_locked(pool, pages,
204                                                                  nr_page);
205                 nvmap_page_pool_unlock(pool);
206 #endif
207                 for (i = page_index; i < nr_page; i++) {
208                         pages[i] = nvmap_alloc_pages_exact(gfp, PAGE_SIZE);
209                         if (!pages[i])
210                                 goto fail;
211                 }
212         }
213
214         /*
215          * Make sure any data in the caches is cleaned out before
216          * passing these pages to userspace. otherwise, It can lead to
217          * corruption in pages that get mapped as something other than WB in
218          * userspace and leaked kernel data structures.
219          *
220          * FIXME: For ARMv7 we don't have __clean_dcache_page() so we continue
221          * to use the flush cache version.
222          */
223 #ifdef ARM64
224         nvmap_clean_cache(pages, nr_page);
225 #else
226         nvmap_flush_cache(pages, nr_page);
227 #endif
228
229         h->size = size;
230         h->pgalloc.pages = pages;
231         h->pgalloc.contig = contiguous;
232         atomic_set(&h->pgalloc.ndirty, 0);
233         return 0;
234
235 fail:
236         while (i--)
237                 __free_page(pages[i]);
238         nvmap_altfree(pages, nr_page * sizeof(*pages));
239         wmb();
240         return -ENOMEM;
241 }
242
243 static void alloc_handle(struct nvmap_client *client,
244                          struct nvmap_handle *h, unsigned int type)
245 {
246         unsigned int carveout_mask = NVMAP_HEAP_CARVEOUT_MASK;
247         unsigned int iovmm_mask = NVMAP_HEAP_IOVMM;
248
249         BUG_ON(type & (type - 1));
250
251 #ifdef CONFIG_NVMAP_CONVERT_CARVEOUT_TO_IOVMM
252         /* Convert generic carveout requests to iovmm requests. */
253         carveout_mask &= ~NVMAP_HEAP_CARVEOUT_GENERIC;
254         iovmm_mask |= NVMAP_HEAP_CARVEOUT_GENERIC;
255 #endif
256
257         if (type & carveout_mask) {
258                 struct nvmap_heap_block *b;
259
260                 b = nvmap_carveout_alloc(client, h, type);
261                 if (b) {
262                         h->heap_pgalloc = false;
263                         /* barrier to ensure all handle alloc data
264                          * is visible before alloc is seen by other
265                          * processors.
266                          */
267                         mb();
268                         h->alloc = true;
269                         nvmap_carveout_commit_add(client,
270                                 nvmap_heap_to_arg(nvmap_block_to_heap(b)),
271                                 h->size);
272                 }
273         } else if (type & iovmm_mask) {
274                 int ret;
275                 size_t reserved = PAGE_ALIGN(h->size);
276
277                 atomic_add(reserved, &client->iovm_commit);
278                 ret = handle_page_alloc(client, h,
279                         h->userflags & NVMAP_HANDLE_PHYS_CONTIG);
280                 if (ret) {
281                         atomic_sub(reserved, &client->iovm_commit);
282                         return;
283                 }
284                 h->heap_pgalloc = true;
285                 mb();
286                 h->alloc = true;
287         }
288 }
289
290 /* small allocations will try to allocate from generic OS memory before
291  * any of the limited heaps, to increase the effective memory for graphics
292  * allocations, and to reduce fragmentation of the graphics heaps with
293  * sub-page splinters */
294 static const unsigned int heap_policy_small[] = {
295         NVMAP_HEAP_CARVEOUT_VPR,
296         NVMAP_HEAP_CARVEOUT_IRAM,
297         NVMAP_HEAP_CARVEOUT_MASK,
298         NVMAP_HEAP_IOVMM,
299         0,
300 };
301
302 static const unsigned int heap_policy_large[] = {
303         NVMAP_HEAP_CARVEOUT_VPR,
304         NVMAP_HEAP_CARVEOUT_IRAM,
305         NVMAP_HEAP_IOVMM,
306         NVMAP_HEAP_CARVEOUT_MASK,
307         0,
308 };
309
310 int nvmap_alloc_handle(struct nvmap_client *client,
311                        struct nvmap_handle *h, unsigned int heap_mask,
312                        size_t align,
313                        u8 kind,
314                        unsigned int flags)
315 {
316         const unsigned int *alloc_policy;
317         int nr_page;
318         int err = -ENOMEM;
319
320         h = nvmap_handle_get(h);
321
322         if (!h)
323                 return -EINVAL;
324
325         if (h->alloc) {
326                 nvmap_handle_put(h);
327                 return -EEXIST;
328         }
329
330         nvmap_stats_inc(NS_TOTAL, PAGE_ALIGN(h->orig_size));
331         nvmap_stats_inc(NS_ALLOC, PAGE_ALIGN(h->size));
332         trace_nvmap_alloc_handle(client, h,
333                 h->size, heap_mask, align, flags,
334                 nvmap_stats_read(NS_TOTAL),
335                 nvmap_stats_read(NS_ALLOC));
336         h->userflags = flags;
337         nr_page = ((h->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
338         h->flags = (flags & NVMAP_HANDLE_CACHE_FLAG);
339         h->align = max_t(size_t, align, L1_CACHE_BYTES);
340         h->kind = kind;
341
342         /* convert iovmm requests to generic carveout. */
343         if (heap_mask & NVMAP_HEAP_IOVMM) {
344                 heap_mask = (heap_mask & ~NVMAP_HEAP_IOVMM) |
345                             NVMAP_HEAP_CARVEOUT_GENERIC;
346         }
347
348         if (!heap_mask) {
349                 err = -EINVAL;
350                 goto out;
351         }
352
353         alloc_policy = (nr_page == 1) ? heap_policy_small : heap_policy_large;
354
355         while (!h->alloc && *alloc_policy) {
356                 unsigned int heap_type;
357
358                 heap_type = *alloc_policy++;
359                 heap_type &= heap_mask;
360
361                 if (!heap_type)
362                         continue;
363
364                 heap_mask &= ~heap_type;
365
366                 while (heap_type && !h->alloc) {
367                         unsigned int heap;
368
369                         /* iterate possible heaps MSB-to-LSB, since higher-
370                          * priority carveouts will have higher usage masks */
371                         heap = 1 << __fls(heap_type);
372                         alloc_handle(client, h, heap);
373                         heap_type &= ~heap;
374                 }
375         }
376
377 out:
378         if (h->alloc) {
379                 if (client->kernel_client)
380                         nvmap_stats_inc(NS_KALLOC, h->size);
381                 else
382                         nvmap_stats_inc(NS_UALLOC, h->size);
383         } else {
384                 nvmap_stats_dec(NS_TOTAL, PAGE_ALIGN(h->orig_size));
385                 nvmap_stats_dec(NS_ALLOC, PAGE_ALIGN(h->orig_size));
386         }
387
388         err = (h->alloc) ? 0 : err;
389         nvmap_handle_put(h);
390         return err;
391 }
392
393 void nvmap_free_handle(struct nvmap_client *client,
394                        struct nvmap_handle *handle)
395 {
396         struct nvmap_handle_ref *ref;
397         struct nvmap_handle *h;
398         int pins;
399
400         nvmap_ref_lock(client);
401
402         ref = __nvmap_validate_locked(client, handle);
403         if (!ref) {
404                 nvmap_ref_unlock(client);
405                 return;
406         }
407
408         trace_nvmap_free_handle(client, handle);
409         BUG_ON(!ref->handle);
410         h = ref->handle;
411
412         if (atomic_dec_return(&ref->dupes)) {
413                 nvmap_ref_unlock(client);
414                 goto out;
415         }
416
417         smp_rmb();
418         pins = atomic_read(&ref->pin);
419         rb_erase(&ref->node, &client->handle_refs);
420         client->handle_count--;
421         atomic_dec(&ref->handle->share_count);
422
423         if (h->alloc && h->heap_pgalloc)
424                 atomic_sub(h->size, &client->iovm_commit);
425
426         if (h->alloc && !h->heap_pgalloc) {
427                 mutex_lock(&h->lock);
428                 nvmap_carveout_commit_subtract(client,
429                         nvmap_heap_to_arg(nvmap_block_to_heap(h->carveout)),
430                         h->size);
431                 mutex_unlock(&h->lock);
432         }
433
434         nvmap_ref_unlock(client);
435
436         if (pins)
437                 pr_debug("%s freeing pinned handle %p\n",
438                             current->group_leader->comm, h);
439
440         while (atomic_read(&ref->pin))
441                 __nvmap_unpin(ref);
442
443         if (h->owner == client)
444                 h->owner = NULL;
445
446         dma_buf_put(ref->handle->dmabuf);
447         kfree(ref);
448
449 out:
450         BUG_ON(!atomic_read(&h->ref));
451         nvmap_handle_put(h);
452 }
453 EXPORT_SYMBOL(nvmap_free_handle);
454
455 void nvmap_free_handle_user_id(struct nvmap_client *client,
456                                unsigned long user_id)
457 {
458         nvmap_free_handle(client, unmarshal_user_id(user_id));
459 }
460
461 static void add_handle_ref(struct nvmap_client *client,
462                            struct nvmap_handle_ref *ref)
463 {
464         struct rb_node **p, *parent = NULL;
465
466         nvmap_ref_lock(client);
467         p = &client->handle_refs.rb_node;
468         while (*p) {
469                 struct nvmap_handle_ref *node;
470                 parent = *p;
471                 node = rb_entry(parent, struct nvmap_handle_ref, node);
472                 if (ref->handle > node->handle)
473                         p = &parent->rb_right;
474                 else
475                         p = &parent->rb_left;
476         }
477         rb_link_node(&ref->node, parent, p);
478         rb_insert_color(&ref->node, &client->handle_refs);
479         client->handle_count++;
480         if (client->handle_count > nvmap_max_handle_count)
481                 nvmap_max_handle_count = client->handle_count;
482         atomic_inc(&ref->handle->share_count);
483         nvmap_ref_unlock(client);
484 }
485
486 struct nvmap_handle_ref *nvmap_create_handle(struct nvmap_client *client,
487                                              size_t size)
488 {
489         void *err = ERR_PTR(-ENOMEM);
490         struct nvmap_handle *h;
491         struct nvmap_handle_ref *ref = NULL;
492
493         if (!client)
494                 return ERR_PTR(-EINVAL);
495
496         if (!size)
497                 return ERR_PTR(-EINVAL);
498
499         h = kzalloc(sizeof(*h), GFP_KERNEL);
500         if (!h)
501                 return ERR_PTR(-ENOMEM);
502
503         ref = kzalloc(sizeof(*ref), GFP_KERNEL);
504         if (!ref)
505                 goto ref_alloc_fail;
506
507         atomic_set(&h->ref, 1);
508         atomic_set(&h->pin, 0);
509         h->owner = client;
510         BUG_ON(!h->owner);
511         h->size = h->orig_size = size;
512         h->flags = NVMAP_HANDLE_WRITE_COMBINE;
513         mutex_init(&h->lock);
514         INIT_LIST_HEAD(&h->vmas);
515
516         /*
517          * This takes out 1 ref on the dambuf. This corresponds to the
518          * handle_ref that gets automatically made by nvmap_create_handle().
519          */
520         h->dmabuf = __nvmap_make_dmabuf(client, h);
521         if (IS_ERR(h->dmabuf)) {
522                 err = h->dmabuf;
523                 goto make_dmabuf_fail;
524         }
525
526         /*
527          * Pre-attach nvmap to this new dmabuf. This gets unattached during the
528          * dma_buf_release() operation.
529          */
530         h->attachment = dma_buf_attach(h->dmabuf, nvmap_dev->dev_user.parent);
531         if (IS_ERR(h->attachment)) {
532                 err = h->attachment;
533                 goto dma_buf_attach_fail;
534         }
535
536         nvmap_handle_add(nvmap_dev, h);
537
538         /*
539          * Major assumption here: the dma_buf object that the handle contains
540          * is created with a ref count of 1.
541          */
542         atomic_set(&ref->dupes, 1);
543         ref->handle = h;
544         atomic_set(&ref->pin, 0);
545         add_handle_ref(client, ref);
546         trace_nvmap_create_handle(client, client->name, h, size, ref);
547         return ref;
548
549 dma_buf_attach_fail:
550         dma_buf_put(h->dmabuf);
551 make_dmabuf_fail:
552         kfree(ref);
553 ref_alloc_fail:
554         kfree(h);
555         return err;
556 }
557
558 struct nvmap_handle_ref *nvmap_duplicate_handle(struct nvmap_client *client,
559                                         struct nvmap_handle *h, bool skip_val)
560 {
561         struct nvmap_handle_ref *ref = NULL;
562
563         BUG_ON(!client);
564         /* on success, the reference count for the handle should be
565          * incremented, so the success paths will not call nvmap_handle_put */
566         h = nvmap_validate_get(h);
567
568         if (!h) {
569                 pr_debug("%s duplicate handle failed\n",
570                             current->group_leader->comm);
571                 return ERR_PTR(-EPERM);
572         }
573
574         if (!h->alloc) {
575                 pr_err("%s duplicating unallocated handle\n",
576                         current->group_leader->comm);
577                 nvmap_handle_put(h);
578                 return ERR_PTR(-EINVAL);
579         }
580
581         nvmap_ref_lock(client);
582         ref = __nvmap_validate_locked(client, h);
583
584         if (ref) {
585                 /* handle already duplicated in client; just increment
586                  * the reference count rather than re-duplicating it */
587                 atomic_inc(&ref->dupes);
588                 nvmap_ref_unlock(client);
589                 return ref;
590         }
591
592         nvmap_ref_unlock(client);
593
594         ref = kzalloc(sizeof(*ref), GFP_KERNEL);
595         if (!ref) {
596                 nvmap_handle_put(h);
597                 return ERR_PTR(-ENOMEM);
598         }
599
600         if (!h->heap_pgalloc) {
601                 mutex_lock(&h->lock);
602                 nvmap_carveout_commit_add(client,
603                         nvmap_heap_to_arg(nvmap_block_to_heap(h->carveout)),
604                         h->size);
605                 mutex_unlock(&h->lock);
606         } else {
607                 atomic_add(h->size, &client->iovm_commit);
608         }
609
610         atomic_set(&ref->dupes, 1);
611         ref->handle = h;
612         atomic_set(&ref->pin, 0);
613         add_handle_ref(client, ref);
614
615         /*
616          * Ref counting on the dma_bufs follows the creation and destruction of
617          * nvmap_handle_refs. That is every time a handle_ref is made the
618          * dma_buf ref count goes up and everytime a handle_ref is destroyed
619          * the dma_buf ref count goes down.
620          */
621         get_dma_buf(h->dmabuf);
622
623         trace_nvmap_duplicate_handle(client, h, ref);
624         return ref;
625 }
626
627 struct nvmap_handle_ref *nvmap_create_handle_from_fd(
628                         struct nvmap_client *client, int fd)
629 {
630         struct nvmap_handle *handle;
631         struct nvmap_handle_ref *ref;
632
633         BUG_ON(!client);
634
635         handle = nvmap_get_id_from_dmabuf_fd(client, fd);
636         if (IS_ERR(handle))
637                 return ERR_CAST(handle);
638         ref = nvmap_duplicate_handle(client, handle, 1);
639         return ref;
640 }
641
642 struct nvmap_handle *nvmap_duplicate_handle_id_ex(struct nvmap_client *client,
643                                                         struct nvmap_handle *h)
644 {
645         struct nvmap_handle_ref *ref = nvmap_duplicate_handle(client, h, 0);
646
647         if (IS_ERR(ref))
648                 return 0;
649
650         return __nvmap_ref_to_id(ref);
651 }
652 EXPORT_SYMBOL(nvmap_duplicate_handle_id_ex);
653
654 int nvmap_get_page_list_info(struct nvmap_client *client,
655                                 struct nvmap_handle *handle, u32 *size,
656                                 u32 *flags, u32 *nr_page, bool *contig)
657 {
658         struct nvmap_handle *h;
659
660         BUG_ON(!size || !flags || !nr_page || !contig);
661         BUG_ON(!client);
662
663         *size = 0;
664         *flags = 0;
665         *nr_page = 0;
666
667         h = nvmap_handle_get(handle);
668
669         if (!h) {
670                 pr_err("%s query invalid handle %p\n",
671                         current->group_leader->comm, handle);
672                 return -EINVAL;
673         }
674
675         if (!h->alloc || !h->heap_pgalloc) {
676                 pr_err("%s query unallocated handle %p\n",
677                         current->group_leader->comm, handle);
678                 nvmap_handle_put(h);
679                 return -EINVAL;
680         }
681
682         *flags = h->flags;
683         *size = h->orig_size;
684         *nr_page = PAGE_ALIGN(h->size) >> PAGE_SHIFT;
685         *contig = h->pgalloc.contig;
686
687         nvmap_handle_put(h);
688         return 0;
689 }
690 EXPORT_SYMBOL(nvmap_get_page_list_info);
691
692 int nvmap_acquire_page_list(struct nvmap_client *client,
693                         struct nvmap_handle *handle, struct page **pages,
694                         u32 nr_page)
695 {
696         struct nvmap_handle *h;
697         struct nvmap_handle_ref *ref;
698         int idx;
699         phys_addr_t dummy;
700
701         BUG_ON(!client);
702
703         h = nvmap_handle_get(handle);
704
705         if (!h) {
706                 pr_err("%s query invalid handle %p\n",
707                           current->group_leader->comm, handle);
708                 return -EINVAL;
709         }
710
711         if (!h->alloc || !h->heap_pgalloc) {
712                 pr_err("%s query unallocated handle %p\n",
713                           current->group_leader->comm, handle);
714                 nvmap_handle_put(h);
715                 return -EINVAL;
716         }
717
718         BUG_ON(nr_page != PAGE_ALIGN(h->size) >> PAGE_SHIFT);
719
720         for (idx = 0; idx < nr_page; idx++)
721                 pages[idx] = h->pgalloc.pages[idx];
722
723         nvmap_ref_lock(client);
724         ref = __nvmap_validate_locked(client, h);
725         if (ref)
726                 __nvmap_pin(ref, &dummy);
727         nvmap_ref_unlock(client);
728
729         return 0;
730 }
731 EXPORT_SYMBOL(nvmap_acquire_page_list);
732
733 int nvmap_release_page_list(struct nvmap_client *client,
734                                 struct nvmap_handle *handle)
735 {
736         struct nvmap_handle_ref *ref;
737         struct nvmap_handle *h = NULL;
738
739         BUG_ON(!client);
740
741         nvmap_ref_lock(client);
742
743         ref = __nvmap_validate_locked(client, handle);
744         if (ref)
745                 __nvmap_unpin(ref);
746
747         nvmap_ref_unlock(client);
748
749         if (ref)
750                 h = ref->handle;
751         if (h)
752                 nvmap_handle_put(h);
753
754         return 0;
755 }
756 EXPORT_SYMBOL(nvmap_release_page_list);
757
758 int __nvmap_get_handle_param(struct nvmap_client *client,
759                              struct nvmap_handle *h, u32 param, u64 *result)
760 {
761         int err = 0;
762
763         if (WARN_ON(!virt_addr_valid(h)))
764                 return -EINVAL;
765
766         switch (param) {
767         case NVMAP_HANDLE_PARAM_SIZE:
768                 *result = h->orig_size;
769                 break;
770         case NVMAP_HANDLE_PARAM_ALIGNMENT:
771                 *result = h->align;
772                 break;
773         case NVMAP_HANDLE_PARAM_BASE:
774                 if (!h->alloc || !atomic_read(&h->pin))
775                         *result = -EINVAL;
776                 else if (!h->heap_pgalloc) {
777                         mutex_lock(&h->lock);
778                         *result = h->carveout->base;
779                         mutex_unlock(&h->lock);
780                 } else if (h->attachment->priv)
781                         *result = sg_dma_address(
782                                 ((struct sg_table *)h->attachment->priv)->sgl);
783                 else
784                         *result = -EINVAL;
785                 break;
786         case NVMAP_HANDLE_PARAM_HEAP:
787                 if (!h->alloc)
788                         *result = 0;
789                 else if (!h->heap_pgalloc) {
790                         mutex_lock(&h->lock);
791                         *result = nvmap_carveout_usage(client, h->carveout);
792                         mutex_unlock(&h->lock);
793                 } else
794                         *result = NVMAP_HEAP_IOVMM;
795                 break;
796         case NVMAP_HANDLE_PARAM_KIND:
797                 *result = h->kind;
798                 break;
799         case NVMAP_HANDLE_PARAM_COMPR:
800                 /* ignored, to be removed */
801                 break;
802         default:
803                 err = -EINVAL;
804                 break;
805         }
806         return err;
807 }
808
809 int nvmap_get_handle_param(struct nvmap_client *client,
810                            struct nvmap_handle_ref *ref, u32 param, u64 *result)
811 {
812         if (WARN_ON(!virt_addr_valid(ref)) ||
813             WARN_ON(!virt_addr_valid(client)) ||
814             WARN_ON(!result))
815                 return -EINVAL;
816
817         return __nvmap_get_handle_param(client, ref->handle, param, result);
818 }