1c5e61ccd45d48c648001716d88e663346244d94
[linux-2.6.git] / drivers / video / tegra / nvmap / nvmap_handle.c
1 /*
2  * drivers/video/tegra/nvmap/nvmap_handle.c
3  *
4  * Handle allocation and freeing routines for nvmap
5  *
6  * Copyright (c) 2009-2014, NVIDIA CORPORATION. All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #define pr_fmt(fmt)     "%s: " fmt, __func__
24
25 #include <linux/err.h>
26 #include <linux/kernel.h>
27 #include <linux/list.h>
28 #include <linux/mm.h>
29 #include <linux/rbtree.h>
30 #include <linux/slab.h>
31 #include <linux/vmalloc.h>
32 #include <linux/fs.h>
33 #include <linux/shrinker.h>
34 #include <linux/moduleparam.h>
35 #include <linux/nvmap.h>
36
37 #include <asm/cacheflush.h>
38 #include <asm/outercache.h>
39 #include <asm/tlbflush.h>
40 #include <asm/pgtable.h>
41
42 #include <mach/iovmm.h>
43 #include <trace/events/nvmap.h>
44
45 #include "nvmap.h"
46 #include "nvmap_mru.h"
47 #include "nvmap_common.h"
48
49 #define NVMAP_SECURE_HEAPS      (NVMAP_HEAP_CARVEOUT_IRAM | NVMAP_HEAP_IOVMM | \
50                                  NVMAP_HEAP_CARVEOUT_VPR)
51 #ifdef CONFIG_NVMAP_HIGHMEM_ONLY
52 #define GFP_NVMAP               (__GFP_HIGHMEM | __GFP_NOWARN)
53 #else
54 #define GFP_NVMAP               (GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN)
55 #endif
56 /* handles may be arbitrarily large (16+MiB), and any handle allocated from
57  * the kernel (i.e., not a carveout handle) includes its array of pages. to
58  * preserve kmalloc space, if the array of pages exceeds PAGELIST_VMALLOC_MIN,
59  * the array is allocated using vmalloc. */
60 #define PAGELIST_VMALLOC_MIN    (PAGE_SIZE)
61
62 #ifdef CONFIG_NVMAP_PAGE_POOLS
63
64 #define NVMAP_TEST_PAGE_POOL_SHRINKER 1
65 static bool enable_pp = 1;
66 static int pool_size[NVMAP_NUM_POOLS];
67
68 static char *s_memtype_str[] = {
69         "uc",
70         "wc",
71         "iwb",
72         "wb",
73 };
74
75 static inline void nvmap_page_pool_lock(struct nvmap_page_pool *pool)
76 {
77         mutex_lock(&pool->lock);
78 }
79
80 static inline void nvmap_page_pool_unlock(struct nvmap_page_pool *pool)
81 {
82         mutex_unlock(&pool->lock);
83 }
84
85 static struct page *nvmap_page_pool_alloc_locked(struct nvmap_page_pool *pool)
86 {
87         struct page *page = NULL;
88
89         if (pool->npages > 0) {
90                 page = pool->page_array[--pool->npages];
91                 atomic_dec(&page->_count);
92                 BUG_ON(atomic_read(&page->_count) != 1);
93         }
94         return page;
95 }
96
97 static struct page *nvmap_page_pool_alloc(struct nvmap_page_pool *pool)
98 {
99         struct page *page = NULL;
100
101         if (pool) {
102                 nvmap_page_pool_lock(pool);
103                 page = nvmap_page_pool_alloc_locked(pool);
104                 nvmap_page_pool_unlock(pool);
105         }
106         return page;
107 }
108
109 static bool nvmap_page_pool_release_locked(struct nvmap_page_pool *pool,
110                                             struct page *page)
111 {
112         int ret = false;
113
114         BUG_ON(atomic_read(&page->_count) != 1);
115         if (enable_pp && pool->npages < pool->max_pages) {
116                 atomic_inc(&page->_count);
117                 pool->page_array[pool->npages++] = page;
118                 ret = true;
119         }
120         return ret;
121 }
122
123 static bool nvmap_page_pool_release(struct nvmap_page_pool *pool,
124                                           struct page *page)
125 {
126         int ret = false;
127
128         if (pool) {
129                 nvmap_page_pool_lock(pool);
130                 ret = nvmap_page_pool_release_locked(pool, page);
131                 nvmap_page_pool_unlock(pool);
132         }
133         return ret;
134 }
135
136 static int nvmap_page_pool_get_available_count(struct nvmap_page_pool *pool)
137 {
138         return pool->npages;
139 }
140
141 static int nvmap_page_pool_free(struct nvmap_page_pool *pool, int nr_free)
142 {
143         int err;
144         int i = nr_free;
145         int idx = 0;
146         struct page *page;
147
148         if (!nr_free)
149                 return nr_free;
150         nvmap_page_pool_lock(pool);
151         while (i) {
152                 page = nvmap_page_pool_alloc_locked(pool);
153                 if (!page)
154                         break;
155                 pool->shrink_array[idx++] = page;
156                 i--;
157         }
158
159         if (idx) {
160                 /* This op should never fail. */
161                 err = set_pages_array_wb(pool->shrink_array, idx);
162                 BUG_ON(err);
163         }
164
165         while (idx--)
166                 __free_page(pool->shrink_array[idx]);
167         nvmap_page_pool_unlock(pool);
168         return i;
169 }
170
171 static int nvmap_page_pool_get_unused_pages(void)
172 {
173         unsigned int i;
174         int total = 0;
175         struct nvmap_share *share = nvmap_get_share_from_dev(nvmap_dev);
176
177         for (i = 0; i < NVMAP_NUM_POOLS; i++)
178                 total += nvmap_page_pool_get_available_count(&share->pools[i]);
179
180         return total;
181 }
182
183 static void nvmap_page_pool_resize(struct nvmap_page_pool *pool, int size)
184 {
185         int available_pages;
186         int pages_to_release = 0;
187         struct page **page_array = NULL;
188         struct page **shrink_array = NULL;
189
190         if (size == pool->max_pages)
191                 return;
192 repeat:
193         nvmap_page_pool_free(pool, pages_to_release);
194         nvmap_page_pool_lock(pool);
195         available_pages = nvmap_page_pool_get_available_count(pool);
196         if (available_pages > size) {
197                 nvmap_page_pool_unlock(pool);
198                 pages_to_release = available_pages - size;
199                 goto repeat;
200         }
201
202         if (size == 0) {
203                 vfree(pool->page_array);
204                 vfree(pool->shrink_array);
205                 pool->page_array = pool->shrink_array = NULL;
206                 goto out;
207         }
208
209         page_array = vmalloc(sizeof(struct page *) * size);
210         shrink_array = vmalloc(sizeof(struct page *) * size);
211         if (!page_array || !shrink_array)
212                 goto fail;
213
214         memcpy(page_array, pool->page_array,
215                 pool->npages * sizeof(struct page *));
216         vfree(pool->page_array);
217         vfree(pool->shrink_array);
218         pool->page_array = page_array;
219         pool->shrink_array = shrink_array;
220 out:
221         pr_debug("%s pool resized to %d from %d pages",
222                 s_memtype_str[pool->flags], size, pool->max_pages);
223         pool->max_pages = size;
224         goto exit;
225 fail:
226         vfree(page_array);
227         vfree(shrink_array);
228         pr_err("failed");
229 exit:
230         nvmap_page_pool_unlock(pool);
231 }
232
233 static int nvmap_page_pool_shrink(struct shrinker *shrinker,
234                                   struct shrink_control *sc)
235 {
236         unsigned int i;
237         unsigned int pool_offset;
238         struct nvmap_page_pool *pool;
239         int shrink_pages = sc->nr_to_scan;
240         static atomic_t start_pool = ATOMIC_INIT(-1);
241         struct nvmap_share *share = nvmap_get_share_from_dev(nvmap_dev);
242
243         if (!shrink_pages)
244                 goto out;
245
246         pr_debug("sh_pages=%d", shrink_pages);
247
248         for (i = 0; i < NVMAP_NUM_POOLS && shrink_pages; i++) {
249                 pool_offset = atomic_add_return(1, &start_pool) %
250                                 NVMAP_NUM_POOLS;
251                 pool = &share->pools[pool_offset];
252                 shrink_pages = nvmap_page_pool_free(pool, shrink_pages);
253         }
254 out:
255         return nvmap_page_pool_get_unused_pages();
256 }
257
258 static struct shrinker nvmap_page_pool_shrinker = {
259         .shrink = nvmap_page_pool_shrink,
260         .seeks = 1,
261 };
262
263 static void shrink_page_pools(int *total_pages, int *available_pages)
264 {
265         struct shrink_control sc;
266
267         sc.gfp_mask = GFP_KERNEL;
268         sc.nr_to_scan = 0;
269         *total_pages = nvmap_page_pool_shrink(NULL, &sc);
270         sc.nr_to_scan = *total_pages * 2;
271         *available_pages = nvmap_page_pool_shrink(NULL, &sc);
272 }
273
274 #if NVMAP_TEST_PAGE_POOL_SHRINKER
275 static bool shrink_pp;
276 static int shrink_set(const char *arg, const struct kernel_param *kp)
277 {
278         int cpu = smp_processor_id();
279         unsigned long long t1, t2;
280         int total_pages, available_pages;
281
282         param_set_bool(arg, kp);
283
284         if (shrink_pp) {
285                 t1 = cpu_clock(cpu);
286                 shrink_page_pools(&total_pages, &available_pages);
287                 t2 = cpu_clock(cpu);
288                 pr_info("shrink page pools: time=%lldns, "
289                         "total_pages_released=%d, free_pages_available=%d",
290                         t2-t1, total_pages, available_pages);
291         }
292         return 0;
293 }
294
295 static int shrink_get(char *buff, const struct kernel_param *kp)
296 {
297         return param_get_bool(buff, kp);
298 }
299
300 static struct kernel_param_ops shrink_ops = {
301         .get = shrink_get,
302         .set = shrink_set,
303 };
304
305 module_param_cb(shrink_page_pools, &shrink_ops, &shrink_pp, 0644);
306 #endif
307
308 static int enable_pp_set(const char *arg, const struct kernel_param *kp)
309 {
310         int total_pages, available_pages;
311
312         param_set_bool(arg, kp);
313
314         if (!enable_pp) {
315                 shrink_page_pools(&total_pages, &available_pages);
316                 pr_info("disabled page pools and released pages, "
317                         "total_pages_released=%d, free_pages_available=%d",
318                         total_pages, available_pages);
319         }
320         return 0;
321 }
322
323 static int enable_pp_get(char *buff, const struct kernel_param *kp)
324 {
325         return param_get_int(buff, kp);
326 }
327
328 static struct kernel_param_ops enable_pp_ops = {
329         .get = enable_pp_get,
330         .set = enable_pp_set,
331 };
332
333 module_param_cb(enable_page_pools, &enable_pp_ops, &enable_pp, 0644);
334
335 #define POOL_SIZE_SET(m, i) \
336 static int pool_size_##m##_set(const char *arg, const struct kernel_param *kp) \
337 { \
338         struct nvmap_share *share = nvmap_get_share_from_dev(nvmap_dev); \
339         param_set_int(arg, kp); \
340         nvmap_page_pool_resize(&share->pools[i], pool_size[i]); \
341         return 0; \
342 }
343
344 #define POOL_SIZE_GET(m) \
345 static int pool_size_##m##_get(char *buff, const struct kernel_param *kp) \
346 { \
347         return param_get_int(buff, kp); \
348 }
349
350 #define POOL_SIZE_OPS(m) \
351 static struct kernel_param_ops pool_size_##m##_ops = { \
352         .get = pool_size_##m##_get, \
353         .set = pool_size_##m##_set, \
354 };
355
356 #define POOL_SIZE_MOUDLE_PARAM_CB(m, i) \
357 module_param_cb(m##_pool_size, &pool_size_##m##_ops, &pool_size[i], 0644)
358
359 POOL_SIZE_SET(uc, NVMAP_HANDLE_UNCACHEABLE);
360 POOL_SIZE_GET(uc);
361 POOL_SIZE_OPS(uc);
362 POOL_SIZE_MOUDLE_PARAM_CB(uc, NVMAP_HANDLE_UNCACHEABLE);
363
364 POOL_SIZE_SET(wc, NVMAP_HANDLE_WRITE_COMBINE);
365 POOL_SIZE_GET(wc);
366 POOL_SIZE_OPS(wc);
367 POOL_SIZE_MOUDLE_PARAM_CB(wc, NVMAP_HANDLE_WRITE_COMBINE);
368
369 POOL_SIZE_SET(iwb, NVMAP_HANDLE_INNER_CACHEABLE);
370 POOL_SIZE_GET(iwb);
371 POOL_SIZE_OPS(iwb);
372 POOL_SIZE_MOUDLE_PARAM_CB(iwb, NVMAP_HANDLE_INNER_CACHEABLE);
373
374 POOL_SIZE_SET(wb, NVMAP_HANDLE_CACHEABLE);
375 POOL_SIZE_GET(wb);
376 POOL_SIZE_OPS(wb);
377 POOL_SIZE_MOUDLE_PARAM_CB(wb, NVMAP_HANDLE_CACHEABLE);
378
379 int nvmap_page_pool_init(struct nvmap_page_pool *pool, int flags)
380 {
381         int i;
382         int err;
383         struct page *page;
384         static int reg = 1;
385         struct sysinfo info;
386         int highmem_pages = 0;
387         typedef int (*set_pages_array) (struct page **pages, int addrinarray);
388         set_pages_array s_cpa[] = {
389                 set_pages_array_uc,
390                 set_pages_array_wc,
391                 set_pages_array_iwb,
392                 set_pages_array_wb
393         };
394
395         BUG_ON(flags >= NVMAP_NUM_POOLS);
396         memset(pool, 0x0, sizeof(*pool));
397         mutex_init(&pool->lock);
398         pool->flags = flags;
399
400         /* No default pool for cached memory. */
401         if (flags == NVMAP_HANDLE_CACHEABLE)
402                 return 0;
403
404         si_meminfo(&info);
405         if (!pool_size[flags] && !CONFIG_NVMAP_PAGE_POOL_SIZE)
406                 /* Use 3/8th of total ram for page pools.
407                  * 1/8th for uc, 1/8th for wc and 1/8th for iwb.
408                  */
409                 pool->max_pages = info.totalram >> 3;
410         else
411                 pool->max_pages = CONFIG_NVMAP_PAGE_POOL_SIZE;
412
413         if (pool->max_pages <= 0 || pool->max_pages >= info.totalram)
414                 goto fail;
415         pool_size[flags] = pool->max_pages;
416         pr_info("nvmap %s page pool size=%d pages",
417                 s_memtype_str[flags], pool->max_pages);
418         pool->page_array = vmalloc(sizeof(void *) * pool->max_pages);
419         pool->shrink_array = vmalloc(sizeof(struct page *) * pool->max_pages);
420         if (!pool->page_array || !pool->shrink_array)
421                 goto fail;
422
423         if (reg) {
424                 reg = 0;
425                 register_shrinker(&nvmap_page_pool_shrinker);
426         }
427
428         nvmap_page_pool_lock(pool);
429         for (i = 0; i < pool->max_pages; i++) {
430                 page = alloc_page(GFP_NVMAP);
431                 if (!page)
432                         goto do_cpa;
433                 if (!nvmap_page_pool_release_locked(pool, page)) {
434                         __free_page(page);
435                         goto do_cpa;
436                 }
437                 if (PageHighMem(page))
438                         highmem_pages++;
439         }
440         si_meminfo(&info);
441         pr_info("nvmap pool = %s, highmem=%d, pool_size=%d,"
442                 "totalram=%lu, freeram=%lu, totalhigh=%lu, freehigh=%lu",
443                 s_memtype_str[flags], highmem_pages, pool->max_pages,
444                 info.totalram, info.freeram, info.totalhigh, info.freehigh);
445 do_cpa:
446         err = (*s_cpa[flags])(pool->page_array, pool->npages);
447         BUG_ON(err);
448         nvmap_page_pool_unlock(pool);
449         return 0;
450 fail:
451         pool->max_pages = 0;
452         vfree(pool->shrink_array);
453         vfree(pool->page_array);
454         return -ENOMEM;
455 }
456 #endif
457
458 static inline void *altalloc(size_t len)
459 {
460         if (len > PAGELIST_VMALLOC_MIN)
461                 return vmalloc(len);
462         else
463                 return kmalloc(len, GFP_KERNEL);
464 }
465
466 static inline void altfree(void *ptr, size_t len)
467 {
468         if (!ptr)
469                 return;
470
471         if (len > PAGELIST_VMALLOC_MIN)
472                 vfree(ptr);
473         else
474                 kfree(ptr);
475 }
476
477 void _nvmap_handle_free(struct nvmap_handle *h)
478 {
479         int err;
480         struct nvmap_share *share = nvmap_get_share_from_dev(h->dev);
481         unsigned int i, nr_page, page_index = 0;
482 #ifdef CONFIG_NVMAP_PAGE_POOLS
483         struct nvmap_page_pool *pool = NULL;
484 #endif
485
486         if (nvmap_handle_remove(h->dev, h) != 0)
487                 return;
488
489         if (!h->alloc)
490                 goto out;
491
492         if (!h->heap_pgalloc) {
493                 nvmap_usecount_inc(h);
494                 nvmap_heap_free(h->carveout);
495                 goto out;
496         }
497
498         nr_page = DIV_ROUND_UP(h->size, PAGE_SIZE);
499
500         BUG_ON(h->size & ~PAGE_MASK);
501         BUG_ON(!h->pgalloc.pages);
502
503         nvmap_mru_remove(share, h);
504
505 #ifdef CONFIG_NVMAP_PAGE_POOLS
506         if (h->flags < NVMAP_NUM_POOLS)
507                 pool = &share->pools[h->flags];
508
509         while (page_index < nr_page) {
510                 if (!nvmap_page_pool_release(pool,
511                     h->pgalloc.pages[page_index]))
512                         break;
513                 page_index++;
514         }
515 #endif
516
517         if (page_index == nr_page)
518                 goto skip_attr_restore;
519
520         /* Restore page attributes. */
521         if (h->flags == NVMAP_HANDLE_WRITE_COMBINE ||
522             h->flags == NVMAP_HANDLE_UNCACHEABLE ||
523             h->flags == NVMAP_HANDLE_INNER_CACHEABLE) {
524                 /* This op should never fail. */
525                 err = set_pages_array_wb(&h->pgalloc.pages[page_index],
526                                 nr_page - page_index);
527                 BUG_ON(err);
528         }
529
530 skip_attr_restore:
531         if (h->pgalloc.area)
532                 tegra_iovmm_free_vm(h->pgalloc.area);
533
534         for (i = page_index; i < nr_page; i++)
535                 __free_page(h->pgalloc.pages[i]);
536
537         altfree(h->pgalloc.pages, nr_page * sizeof(struct page *));
538
539 out:
540         kfree(h);
541 }
542
543 static struct page *nvmap_alloc_pages_exact(gfp_t gfp, size_t size)
544 {
545         struct page *page, *p, *e;
546         unsigned int order;
547
548         size = PAGE_ALIGN(size);
549         order = get_order(size);
550         page = alloc_pages(gfp, order);
551
552         if (!page)
553                 return NULL;
554
555         split_page(page, order);
556         e = page + (1 << order);
557         for (p = page + (size >> PAGE_SHIFT); p < e; p++)
558                 __free_page(p);
559
560         return page;
561 }
562
563 static int handle_page_alloc(struct nvmap_client *client,
564                              struct nvmap_handle *h, bool contiguous)
565 {
566         int err = 0;
567         size_t size = PAGE_ALIGN(h->size);
568         unsigned int nr_page = size >> PAGE_SHIFT;
569         pgprot_t prot;
570         unsigned int i = 0, page_index = 0;
571         struct page **pages;
572 #ifdef CONFIG_NVMAP_PAGE_POOLS
573         struct nvmap_page_pool *pool = NULL;
574         struct nvmap_share *share = nvmap_get_share_from_dev(h->dev);
575 #endif
576         gfp_t gfp = GFP_NVMAP;
577         unsigned long kaddr, paddr;
578         pte_t **pte = NULL;
579
580         if (h->userflags & NVMAP_HANDLE_ZEROED_PAGES) {
581                 gfp |= __GFP_ZERO;
582                 prot = nvmap_pgprot(h, pgprot_kernel);
583                 pte = nvmap_alloc_pte(client->dev, (void **)&kaddr);
584                 if (IS_ERR(pte))
585                         return -ENOMEM;
586         }
587
588         pages = altalloc(nr_page * sizeof(*pages));
589         if (!pages)
590                 return -ENOMEM;
591
592         prot = nvmap_pgprot(h, pgprot_kernel);
593
594         h->pgalloc.area = NULL;
595         if (contiguous) {
596                 struct page *page;
597                 page = nvmap_alloc_pages_exact(gfp, size);
598                 if (!page)
599                         goto fail;
600
601                 for (i = 0; i < nr_page; i++)
602                         pages[i] = nth_page(page, i);
603
604         } else {
605 #ifdef CONFIG_NVMAP_PAGE_POOLS
606                 if (h->flags < NVMAP_NUM_POOLS)
607                         pool = &share->pools[h->flags];
608
609                 for (i = 0; i < nr_page; i++) {
610                         /* Get pages from pool, if available. */
611                         pages[i] = nvmap_page_pool_alloc(pool);
612                         if (!pages[i])
613                                 break;
614                         if (h->userflags & NVMAP_HANDLE_ZEROED_PAGES) {
615                                 /*
616                                  * Just memset low mem pages; they will for
617                                  * sure have a virtual address. Otherwise, build
618                                  * a mapping for the page in the kernel.
619                                  */
620                                 if (!PageHighMem(pages[i])) {
621                                         memset(page_address(pages[i]), 0,
622                                                PAGE_SIZE);
623                                 } else {
624                                         paddr = page_to_phys(pages[i]);
625                                         set_pte_at(&init_mm, kaddr, *pte,
626                                                    pfn_pte(__phys_to_pfn(paddr),
627                                                            prot));
628                                         flush_tlb_kernel_page(kaddr);
629                                         memset((char *)kaddr, 0, PAGE_SIZE);
630                                 }
631                         }
632                         page_index++;
633                 }
634 #endif
635                 for (; i < nr_page; i++) {
636                         pages[i] = nvmap_alloc_pages_exact(gfp, PAGE_SIZE);
637                         if (!pages[i])
638                                 goto fail;
639                 }
640
641 #ifndef CONFIG_NVMAP_RECLAIM_UNPINNED_VM
642                 h->pgalloc.area = tegra_iovmm_create_vm(client->share->iovmm,
643                                         NULL, size, h->align, prot,
644                                         h->pgalloc.iovm_addr);
645                 if (!h->pgalloc.area)
646                         goto fail;
647
648                 h->pgalloc.dirty = true;
649 #endif
650         }
651
652         if (nr_page == page_index)
653                 goto skip_attr_change;
654
655         /* Update the pages mapping in kernel page table. */
656         if (h->flags == NVMAP_HANDLE_WRITE_COMBINE)
657                 err = set_pages_array_wc(&pages[page_index],
658                                         nr_page - page_index);
659         else if (h->flags == NVMAP_HANDLE_UNCACHEABLE)
660                 err = set_pages_array_uc(&pages[page_index],
661                                         nr_page - page_index);
662         else if (h->flags == NVMAP_HANDLE_INNER_CACHEABLE)
663                 err = set_pages_array_iwb(&pages[page_index],
664                                         nr_page - page_index);
665
666         if (err)
667                 goto fail;
668
669 skip_attr_change:
670         if (h->userflags & NVMAP_HANDLE_ZEROED_PAGES)
671                 nvmap_free_pte(client->dev, pte);
672         h->size = size;
673         h->pgalloc.pages = pages;
674         h->pgalloc.contig = contiguous;
675         INIT_LIST_HEAD(&h->pgalloc.mru_list);
676         return 0;
677
678 fail:
679         if (h->userflags & NVMAP_HANDLE_ZEROED_PAGES)
680                 nvmap_free_pte(client->dev, pte);
681         err = set_pages_array_wb(pages, i);
682         BUG_ON(err);
683         while (i--)
684                 __free_page(pages[i]);
685         altfree(pages, nr_page * sizeof(*pages));
686         wmb();
687         return -ENOMEM;
688 }
689
690 static void alloc_handle(struct nvmap_client *client,
691                          struct nvmap_handle *h, unsigned int type)
692 {
693         unsigned int carveout_mask = NVMAP_HEAP_CARVEOUT_MASK;
694         unsigned int iovmm_mask = NVMAP_HEAP_IOVMM;
695
696         BUG_ON(type & (type - 1));
697
698 #ifdef CONFIG_NVMAP_CONVERT_CARVEOUT_TO_IOVMM
699         /* Convert generic carveout requests to iovmm requests. */
700         carveout_mask &= ~NVMAP_HEAP_CARVEOUT_GENERIC;
701         iovmm_mask |= NVMAP_HEAP_CARVEOUT_GENERIC;
702 #endif
703
704         if (type & carveout_mask) {
705                 struct nvmap_heap_block *b;
706                 /* Protect handle from relocation */
707                 nvmap_usecount_inc(h);
708
709                 b = nvmap_carveout_alloc(client, h, type);
710                 if (b) {
711                         h->heap_pgalloc = false;
712                         h->alloc = true;
713                         nvmap_carveout_commit_add(client,
714                                 nvmap_heap_to_arg(nvmap_block_to_heap(b)),
715                                 h->size);
716                 }
717                 nvmap_usecount_dec(h);
718
719         } else if (type & iovmm_mask) {
720                 size_t reserved = PAGE_ALIGN(h->size);
721                 int commit = 0;
722                 int ret;
723
724                 /* increment the committed IOVM space prior to allocation
725                  * to avoid race conditions with other threads simultaneously
726                  * allocating. */
727                 commit = atomic_add_return(reserved,
728                                             &client->iovm_commit);
729
730                 if (commit < client->iovm_limit)
731                         ret = handle_page_alloc(client, h, false);
732                 else
733                         ret = -ENOMEM;
734
735                 if (!ret) {
736                         h->heap_pgalloc = true;
737                         h->alloc = true;
738                 } else {
739                         atomic_sub(reserved, &client->iovm_commit);
740                 }
741
742         } else if (type & NVMAP_HEAP_SYSMEM) {
743                 if (handle_page_alloc(client, h, true) == 0) {
744                         BUG_ON(!h->pgalloc.contig);
745                         h->heap_pgalloc = true;
746                         h->alloc = true;
747                 }
748         }
749 }
750
751 /* small allocations will try to allocate from generic OS memory before
752  * any of the limited heaps, to increase the effective memory for graphics
753  * allocations, and to reduce fragmentation of the graphics heaps with
754  * sub-page splinters */
755 static const unsigned int heap_policy_small[] = {
756         NVMAP_HEAP_CARVEOUT_VPR,
757         NVMAP_HEAP_CARVEOUT_IRAM,
758 #ifdef CONFIG_NVMAP_ALLOW_SYSMEM
759         NVMAP_HEAP_SYSMEM,
760 #endif
761         NVMAP_HEAP_CARVEOUT_MASK,
762         NVMAP_HEAP_IOVMM,
763         0,
764 };
765
766 static const unsigned int heap_policy_large[] = {
767         NVMAP_HEAP_CARVEOUT_VPR,
768         NVMAP_HEAP_CARVEOUT_IRAM,
769         NVMAP_HEAP_IOVMM,
770         NVMAP_HEAP_CARVEOUT_MASK,
771 #ifdef CONFIG_NVMAP_ALLOW_SYSMEM
772         NVMAP_HEAP_SYSMEM,
773 #endif
774         0,
775 };
776
777 int nvmap_alloc_handle_id(struct nvmap_client *client,
778                           unsigned long id, unsigned int heap_mask,
779                           size_t align, unsigned int flags)
780 {
781         struct nvmap_handle *h = NULL;
782         const unsigned int *alloc_policy;
783         int nr_page;
784         int err = -ENOMEM;
785
786         h = nvmap_get_handle_id(client, id);
787
788         if (!h)
789                 return -EINVAL;
790
791         if (h->alloc)
792                 goto out;
793
794         trace_nvmap_alloc_handle_id(client, id, heap_mask, align, flags);
795         h->userflags = flags;
796         nr_page = ((h->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
797         h->secure = !!(flags & NVMAP_HANDLE_SECURE);
798         h->flags = (flags & NVMAP_HANDLE_CACHE_FLAG);
799         h->align = max_t(size_t, align, L1_CACHE_BYTES);
800
801 #ifndef CONFIG_TEGRA_IOVMM
802         /* convert iovmm requests to generic carveout. */
803         if (heap_mask & NVMAP_HEAP_IOVMM) {
804                 heap_mask = (heap_mask & ~NVMAP_HEAP_IOVMM) |
805                             NVMAP_HEAP_CARVEOUT_GENERIC;
806         }
807 #endif
808 #ifdef CONFIG_NVMAP_ALLOW_SYSMEM
809         /* Allow single pages allocations in system memory to save
810          * carveout space and avoid extra iovm mappings */
811         if (nr_page == 1) {
812                 if (heap_mask &
813                     (NVMAP_HEAP_IOVMM | NVMAP_HEAP_CARVEOUT_GENERIC))
814                         heap_mask |= NVMAP_HEAP_SYSMEM;
815         }
816 #endif
817 #ifndef CONFIG_NVMAP_CONVERT_CARVEOUT_TO_IOVMM
818         /* This restriction is deprecated as alignments greater than
819            PAGE_SIZE are now correctly handled, but it is retained for
820            AP20 compatibility. */
821         if (h->align > PAGE_SIZE)
822                 heap_mask &= NVMAP_HEAP_CARVEOUT_MASK;
823 #endif
824         /* secure allocations can only be served from secure heaps */
825         if (h->secure)
826                 heap_mask &= NVMAP_SECURE_HEAPS;
827
828         if (!heap_mask) {
829                 err = -EINVAL;
830                 goto out;
831         }
832
833         alloc_policy = (nr_page == 1) ? heap_policy_small : heap_policy_large;
834
835         while (!h->alloc && *alloc_policy) {
836                 unsigned int heap_type;
837
838                 heap_type = *alloc_policy++;
839                 heap_type &= heap_mask;
840
841                 if (!heap_type)
842                         continue;
843
844                 heap_mask &= ~heap_type;
845
846                 while (heap_type && !h->alloc) {
847                         unsigned int heap;
848
849                         /* iterate possible heaps MSB-to-LSB, since higher-
850                          * priority carveouts will have higher usage masks */
851                         heap = 1 << __fls(heap_type);
852                         alloc_handle(client, h, heap);
853                         heap_type &= ~heap;
854                 }
855         }
856
857 out:
858         err = (h->alloc) ? 0 : err;
859         nvmap_handle_put(h);
860         return err;
861 }
862
863 void nvmap_free_handle_id(struct nvmap_client *client, unsigned long id)
864 {
865         struct nvmap_handle_ref *ref;
866         struct nvmap_handle *h;
867         int pins;
868
869         nvmap_ref_lock(client);
870
871         ref = _nvmap_validate_id_locked(client, id);
872         if (!ref) {
873                 nvmap_ref_unlock(client);
874                 return;
875         }
876
877         trace_nvmap_free_handle_id(client, id);
878         BUG_ON(!ref->handle);
879         h = ref->handle;
880
881         if (atomic_dec_return(&ref->dupes)) {
882                 nvmap_ref_unlock(client);
883                 goto out;
884         }
885
886         smp_rmb();
887         pins = atomic_read(&ref->pin);
888         rb_erase(&ref->node, &client->handle_refs);
889
890         if (h->alloc && h->heap_pgalloc && !h->pgalloc.contig)
891                 atomic_sub(h->size, &client->iovm_commit);
892
893         if (h->alloc && !h->heap_pgalloc) {
894                 mutex_lock(&h->lock);
895                 nvmap_carveout_commit_subtract(client,
896                         nvmap_heap_to_arg(nvmap_block_to_heap(h->carveout)),
897                         h->size);
898                 mutex_unlock(&h->lock);
899         }
900
901         nvmap_ref_unlock(client);
902
903         if (pins)
904                 nvmap_err(client, "%s freeing pinned handle %p\n",
905                           current->group_leader->comm, h);
906
907         while (pins--)
908                 nvmap_unpin_handles(client, &ref->handle, 1);
909
910         if (h->owner == client)
911                 h->owner = NULL;
912
913         kfree(ref);
914
915 out:
916         BUG_ON(!atomic_read(&h->ref));
917         nvmap_handle_put(h);
918 }
919
920 static void add_handle_ref(struct nvmap_client *client,
921                            struct nvmap_handle_ref *ref)
922 {
923         struct rb_node **p, *parent = NULL;
924
925         nvmap_ref_lock(client);
926         p = &client->handle_refs.rb_node;
927         while (*p) {
928                 struct nvmap_handle_ref *node;
929                 parent = *p;
930                 node = rb_entry(parent, struct nvmap_handle_ref, node);
931                 if (ref->handle > node->handle)
932                         p = &parent->rb_right;
933                 else
934                         p = &parent->rb_left;
935         }
936         rb_link_node(&ref->node, parent, p);
937         rb_insert_color(&ref->node, &client->handle_refs);
938         nvmap_ref_unlock(client);
939 }
940
941 struct nvmap_handle_ref *nvmap_create_handle(struct nvmap_client *client,
942                                              size_t size)
943 {
944         struct nvmap_handle *h;
945         struct nvmap_handle_ref *ref = NULL;
946
947         if (!client)
948                 return ERR_PTR(-EINVAL);
949
950         if (!size)
951                 return ERR_PTR(-EINVAL);
952
953         h = kzalloc(sizeof(*h), GFP_KERNEL);
954         if (!h)
955                 return ERR_PTR(-ENOMEM);
956
957         ref = kzalloc(sizeof(*ref), GFP_KERNEL);
958         if (!ref) {
959                 kfree(h);
960                 return ERR_PTR(-ENOMEM);
961         }
962
963         atomic_set(&h->ref, 1);
964         atomic_set(&h->pin, 0);
965         h->owner = client;
966         h->dev = client->dev;
967         BUG_ON(!h->owner);
968         h->size = h->orig_size = size;
969         h->flags = NVMAP_HANDLE_WRITE_COMBINE;
970         mutex_init(&h->lock);
971
972         nvmap_handle_add(client->dev, h);
973
974         atomic_set(&ref->dupes, 1);
975         ref->handle = h;
976         atomic_set(&ref->pin, 0);
977         add_handle_ref(client, ref);
978         trace_nvmap_create_handle(client, h, size, ref);
979         return ref;
980 }
981
982 struct nvmap_handle_ref *nvmap_duplicate_handle_id(struct nvmap_client *client,
983                                                    unsigned long id)
984 {
985         struct nvmap_handle_ref *ref = NULL;
986         struct nvmap_handle *h = NULL;
987
988         BUG_ON(!client || client->dev != nvmap_dev);
989         /* on success, the reference count for the handle should be
990          * incremented, so the success paths will not call nvmap_handle_put */
991
992         /* Allow the handle to be accessed by other (non-owner)
993          clients only if the owner is "videobuf2-dma-nvmap
994          which is a V4L2 capture kernel module. This handle can
995          be accessed by the "user" client for rendering/encoding */
996         if (!strcmp(((struct nvmap_handle *)id)->owner->name,
997                                 "videobuf2-dma-nvmap"))
998                 client = ((struct nvmap_handle *)id)->owner;
999
1000         h = nvmap_validate_get(client, id);
1001
1002         if (!h) {
1003                 nvmap_debug(client, "%s duplicate handle failed\n",
1004                             current->group_leader->comm);
1005                 return ERR_PTR(-EPERM);
1006         }
1007
1008         if (!h->alloc) {
1009                 nvmap_err(client, "%s duplicating unallocated handle\n",
1010                           current->group_leader->comm);
1011                 nvmap_handle_put(h);
1012                 return ERR_PTR(-EINVAL);
1013         }
1014
1015         nvmap_ref_lock(client);
1016         ref = _nvmap_validate_id_locked(client, (unsigned long)h);
1017
1018         if (ref) {
1019                 /* handle already duplicated in client; just increment
1020                  * the reference count rather than re-duplicating it */
1021                 atomic_inc(&ref->dupes);
1022                 nvmap_ref_unlock(client);
1023                 return ref;
1024         }
1025
1026         nvmap_ref_unlock(client);
1027
1028         /* verify that adding this handle to the process' access list
1029          * won't exceed the IOVM limit */
1030         if (h->heap_pgalloc && !h->pgalloc.contig) {
1031                 int oc;
1032                 oc = atomic_add_return(h->size, &client->iovm_commit);
1033                 if (oc > client->iovm_limit && !client->super) {
1034                         atomic_sub(h->size, &client->iovm_commit);
1035                         nvmap_handle_put(h);
1036                         nvmap_err(client, "duplicating %p in %s over-commits"
1037                                   " IOVMM space\n", (void *)id,
1038                                   current->group_leader->comm);
1039                         return ERR_PTR(-ENOMEM);
1040                 }
1041         }
1042
1043         ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1044         if (!ref) {
1045                 nvmap_handle_put(h);
1046                 return ERR_PTR(-ENOMEM);
1047         }
1048
1049         if (!h->heap_pgalloc) {
1050                 mutex_lock(&h->lock);
1051                 nvmap_carveout_commit_add(client,
1052                         nvmap_heap_to_arg(nvmap_block_to_heap(h->carveout)),
1053                         h->size);
1054                 mutex_unlock(&h->lock);
1055         }
1056
1057         atomic_set(&ref->dupes, 1);
1058         ref->handle = h;
1059         atomic_set(&ref->pin, 0);
1060         add_handle_ref(client, ref);
1061         trace_nvmap_duplicate_handle_id(client, id, ref);
1062         return ref;
1063 }