0708e7468dadeb131eac6a3bc56560a3793115fd
[linux-2.6.git] / drivers / video / tegra / nvmap / nvmap_handle.c
1 /*
2  * drivers/video/tegra/nvmap/nvmap_handle.c
3  *
4  * Handle allocation and freeing routines for nvmap
5  *
6  * Copyright (c) 2009-2012, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #define pr_fmt(fmt)     "%s: " fmt, __func__
24
25 #include <linux/err.h>
26 #include <linux/kernel.h>
27 #include <linux/list.h>
28 #include <linux/mm.h>
29 #include <linux/rbtree.h>
30 #include <linux/slab.h>
31 #include <linux/vmalloc.h>
32 #include <linux/fs.h>
33
34 #include <asm/cacheflush.h>
35 #include <asm/outercache.h>
36 #include <asm/pgtable.h>
37
38 #include <mach/iovmm.h>
39 #include <mach/nvmap.h>
40
41 #include <linux/vmstat.h>
42 #include <linux/swap.h>
43 #include <linux/shrinker.h>
44 #include <linux/moduleparam.h>
45
46 #include "nvmap.h"
47 #include "nvmap_mru.h"
48 #include "nvmap_common.h"
49
50 #define PRINT_CARVEOUT_CONVERSION 0
51 #if PRINT_CARVEOUT_CONVERSION
52 #define PR_INFO pr_info
53 #else
54 #define PR_INFO(...)
55 #endif
56
57 #define NVMAP_SECURE_HEAPS      (NVMAP_HEAP_CARVEOUT_IRAM | NVMAP_HEAP_IOVMM | \
58                                  NVMAP_HEAP_CARVEOUT_VPR)
59 #ifdef CONFIG_NVMAP_HIGHMEM_ONLY
60 #define GFP_NVMAP               (__GFP_HIGHMEM | __GFP_NOWARN)
61 #else
62 #define GFP_NVMAP               (GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN)
63 #endif
64 /* handles may be arbitrarily large (16+MiB), and any handle allocated from
65  * the kernel (i.e., not a carveout handle) includes its array of pages. to
66  * preserve kmalloc space, if the array of pages exceeds PAGELIST_VMALLOC_MIN,
67  * the array is allocated using vmalloc. */
68 #define PAGELIST_VMALLOC_MIN    (PAGE_SIZE * 2)
69
70 #ifdef CONFIG_NVMAP_PAGE_POOLS
71
72 #define NVMAP_TEST_PAGE_POOL_SHRINKER 1
73 static bool enable_pp = 1;
74 static int pool_size[NVMAP_NUM_POOLS];
75
76 static char *s_memtype_str[] = {
77         "uc",
78         "wc",
79         "iwb",
80         "wb",
81 };
82
83 static inline void nvmap_page_pool_lock(struct nvmap_page_pool *pool)
84 {
85         mutex_lock(&pool->lock);
86 }
87
88 static inline void nvmap_page_pool_unlock(struct nvmap_page_pool *pool)
89 {
90         mutex_unlock(&pool->lock);
91 }
92
93 static struct page *nvmap_page_pool_alloc_locked(struct nvmap_page_pool *pool)
94 {
95         struct page *page = NULL;
96
97         if (pool->npages > 0)
98                 page = pool->page_array[--pool->npages];
99         return page;
100 }
101
102 static struct page *nvmap_page_pool_alloc(struct nvmap_page_pool *pool)
103 {
104         struct page *page = NULL;
105
106         if (pool) {
107                 nvmap_page_pool_lock(pool);
108                 page = nvmap_page_pool_alloc_locked(pool);
109                 nvmap_page_pool_unlock(pool);
110         }
111         return page;
112 }
113
114 static bool nvmap_page_pool_release_locked(struct nvmap_page_pool *pool,
115                                             struct page *page)
116 {
117         int ret = false;
118
119         if (enable_pp && pool->npages < pool->max_pages) {
120                 pool->page_array[pool->npages++] = page;
121                 ret = true;
122         }
123         return ret;
124 }
125
126 static bool nvmap_page_pool_release(struct nvmap_page_pool *pool,
127                                           struct page *page)
128 {
129         int ret = false;
130
131         if (pool) {
132                 nvmap_page_pool_lock(pool);
133                 ret = nvmap_page_pool_release_locked(pool, page);
134                 nvmap_page_pool_unlock(pool);
135         }
136         return ret;
137 }
138
139 static int nvmap_page_pool_get_available_count(struct nvmap_page_pool *pool)
140 {
141         return pool->npages;
142 }
143
144 static int nvmap_page_pool_free(struct nvmap_page_pool *pool, int nr_free)
145 {
146         int i = nr_free;
147         int idx = 0;
148         struct page *page;
149
150         if (!nr_free)
151                 return nr_free;
152         nvmap_page_pool_lock(pool);
153         while (i) {
154                 page = nvmap_page_pool_alloc_locked(pool);
155                 if (!page)
156                         break;
157                 pool->shrink_array[idx++] = page;
158                 i--;
159         }
160
161         if (idx)
162                 set_pages_array_wb(pool->shrink_array, idx);
163         while (idx--)
164                 __free_page(pool->shrink_array[idx]);
165         nvmap_page_pool_unlock(pool);
166         return i;
167 }
168
169 static int nvmap_page_pool_get_unused_pages(void)
170 {
171         unsigned int i;
172         int total = 0;
173         struct nvmap_share *share = nvmap_get_share_from_dev(nvmap_dev);
174
175         for (i = 0; i < NVMAP_NUM_POOLS; i++)
176                 total += nvmap_page_pool_get_available_count(&share->pools[i]);
177
178         return total;
179 }
180
181 static void nvmap_page_pool_resize(struct nvmap_page_pool *pool, int size)
182 {
183         int available_pages;
184         int pages_to_release = 0;
185         struct page **page_array = NULL;
186         struct page **shrink_array = NULL;
187
188         if (size == pool->max_pages)
189                 return;
190 repeat:
191         nvmap_page_pool_free(pool, pages_to_release);
192         nvmap_page_pool_lock(pool);
193         available_pages = nvmap_page_pool_get_available_count(pool);
194         if (available_pages > size) {
195                 nvmap_page_pool_unlock(pool);
196                 pages_to_release = available_pages - size;
197                 goto repeat;
198         }
199
200         if (size == 0) {
201                 vfree(pool->page_array);
202                 vfree(pool->shrink_array);
203                 pool->page_array = pool->shrink_array = NULL;
204                 goto out;
205         }
206
207         page_array = vmalloc(sizeof(struct page *) * size);
208         shrink_array = vmalloc(sizeof(struct page *) * size);
209         if (!page_array || !shrink_array)
210                 goto fail;
211
212         memcpy(page_array, pool->page_array,
213                 pool->npages * sizeof(struct page *));
214         vfree(pool->page_array);
215         vfree(pool->shrink_array);
216         pool->page_array = page_array;
217         pool->shrink_array = shrink_array;
218 out:
219         pr_debug("%s pool resized to %d from %d pages",
220                 s_memtype_str[pool->flags], size, pool->max_pages);
221         pool->max_pages = size;
222         goto exit;
223 fail:
224         vfree(page_array);
225         vfree(shrink_array);
226         pr_err("failed");
227 exit:
228         nvmap_page_pool_unlock(pool);
229 }
230
231 static int nvmap_page_pool_shrink(struct shrinker *shrinker,
232                                   struct shrink_control *sc)
233 {
234         unsigned int i;
235         unsigned int pool_offset;
236         struct nvmap_page_pool *pool;
237         int shrink_pages = sc->nr_to_scan;
238         static atomic_t start_pool = ATOMIC_INIT(-1);
239         struct nvmap_share *share = nvmap_get_share_from_dev(nvmap_dev);
240
241         if (!shrink_pages)
242                 goto out;
243
244         pr_debug("sh_pages=%d", shrink_pages);
245
246         for (i = 0; i < NVMAP_NUM_POOLS && shrink_pages; i++) {
247                 pool_offset = atomic_add_return(1, &start_pool) %
248                                 NVMAP_NUM_POOLS;
249                 pool = &share->pools[pool_offset];
250                 shrink_pages = nvmap_page_pool_free(pool, shrink_pages);
251         }
252 out:
253         return nvmap_page_pool_get_unused_pages();
254 }
255
256 static struct shrinker nvmap_page_pool_shrinker = {
257         .shrink = nvmap_page_pool_shrink,
258         .seeks = 1,
259 };
260
261 static void shrink_page_pools(int *total_pages, int *available_pages)
262 {
263         struct shrink_control sc;
264
265         sc.gfp_mask = GFP_KERNEL;
266         sc.nr_to_scan = 0;
267         *total_pages = nvmap_page_pool_shrink(NULL, &sc);
268         sc.nr_to_scan = *total_pages * 2;
269         *available_pages = nvmap_page_pool_shrink(NULL, &sc);
270 }
271
272 #if NVMAP_TEST_PAGE_POOL_SHRINKER
273 static bool shrink_pp;
274 static int shrink_set(const char *arg, const struct kernel_param *kp)
275 {
276         int cpu = smp_processor_id();
277         unsigned long long t1, t2;
278         int total_pages, available_pages;
279
280         param_set_bool(arg, kp);
281
282         if (shrink_pp) {
283                 t1 = cpu_clock(cpu);
284                 shrink_page_pools(&total_pages, &available_pages);
285                 t2 = cpu_clock(cpu);
286                 pr_info("shrink page pools: time=%lldns, "
287                         "total_pages_released=%d, free_pages_available=%d",
288                         t2-t1, total_pages, available_pages);
289         }
290         return 0;
291 }
292
293 static int shrink_get(char *buff, const struct kernel_param *kp)
294 {
295         return param_get_bool(buff, kp);
296 }
297
298 static struct kernel_param_ops shrink_ops = {
299         .get = shrink_get,
300         .set = shrink_set,
301 };
302
303 module_param_cb(shrink_page_pools, &shrink_ops, &shrink_pp, 0644);
304 #endif
305
306 static int enable_pp_set(const char *arg, const struct kernel_param *kp)
307 {
308         int total_pages, available_pages;
309
310         param_set_bool(arg, kp);
311
312         if (!enable_pp) {
313                 shrink_page_pools(&total_pages, &available_pages);
314                 pr_info("disabled page pools and released pages, "
315                         "total_pages_released=%d, free_pages_available=%d",
316                         total_pages, available_pages);
317         }
318         return 0;
319 }
320
321 static int enable_pp_get(char *buff, const struct kernel_param *kp)
322 {
323         return param_get_int(buff, kp);
324 }
325
326 static struct kernel_param_ops enable_pp_ops = {
327         .get = enable_pp_get,
328         .set = enable_pp_set,
329 };
330
331 module_param_cb(enable_page_pools, &enable_pp_ops, &enable_pp, 0644);
332
333 #define POOL_SIZE_SET(m, i) \
334 static int pool_size_##m##_set(const char *arg, const struct kernel_param *kp) \
335 { \
336         struct nvmap_share *share = nvmap_get_share_from_dev(nvmap_dev); \
337         param_set_int(arg, kp); \
338         nvmap_page_pool_resize(&share->pools[i], pool_size[i]); \
339         return 0; \
340 }
341
342 #define POOL_SIZE_GET(m) \
343 static int pool_size_##m##_get(char *buff, const struct kernel_param *kp) \
344 { \
345         return param_get_int(buff, kp); \
346 }
347
348 #define POOL_SIZE_OPS(m) \
349 static struct kernel_param_ops pool_size_##m##_ops = { \
350         .get = pool_size_##m##_get, \
351         .set = pool_size_##m##_set, \
352 };
353
354 #define POOL_SIZE_MOUDLE_PARAM_CB(m, i) \
355 module_param_cb(m##_pool_size, &pool_size_##m##_ops, &pool_size[i], 0644)
356
357 POOL_SIZE_SET(uc, NVMAP_HANDLE_UNCACHEABLE);
358 POOL_SIZE_GET(uc);
359 POOL_SIZE_OPS(uc);
360 POOL_SIZE_MOUDLE_PARAM_CB(uc, NVMAP_HANDLE_UNCACHEABLE);
361
362 POOL_SIZE_SET(wc, NVMAP_HANDLE_WRITE_COMBINE);
363 POOL_SIZE_GET(wc);
364 POOL_SIZE_OPS(wc);
365 POOL_SIZE_MOUDLE_PARAM_CB(wc, NVMAP_HANDLE_WRITE_COMBINE);
366
367 POOL_SIZE_SET(iwb, NVMAP_HANDLE_INNER_CACHEABLE);
368 POOL_SIZE_GET(iwb);
369 POOL_SIZE_OPS(iwb);
370 POOL_SIZE_MOUDLE_PARAM_CB(iwb, NVMAP_HANDLE_INNER_CACHEABLE);
371
372 POOL_SIZE_SET(wb, NVMAP_HANDLE_CACHEABLE);
373 POOL_SIZE_GET(wb);
374 POOL_SIZE_OPS(wb);
375 POOL_SIZE_MOUDLE_PARAM_CB(wb, NVMAP_HANDLE_CACHEABLE);
376
377 int nvmap_page_pool_init(struct nvmap_page_pool *pool, int flags)
378 {
379         struct page *page;
380         int i;
381         static int reg = 1;
382         struct sysinfo info;
383         int highmem_pages = 0;
384         typedef int (*set_pages_array) (struct page **pages, int addrinarray);
385         set_pages_array s_cpa[] = {
386                 set_pages_array_uc,
387                 set_pages_array_wc,
388                 set_pages_array_iwb,
389                 set_pages_array_wb
390         };
391
392         BUG_ON(flags >= NVMAP_NUM_POOLS);
393         memset(pool, 0x0, sizeof(*pool));
394         mutex_init(&pool->lock);
395         pool->flags = flags;
396
397         /* No default pool for cached memory. */
398         if (flags == NVMAP_HANDLE_CACHEABLE)
399                 return 0;
400
401         si_meminfo(&info);
402         if (!pool_size[flags] && !CONFIG_NVMAP_PAGE_POOL_SIZE)
403                 /* Use 3/8th of total ram for page pools.
404                  * 1/8th for uc, 1/8th for wc and 1/8th for iwb.
405                  */
406                 pool->max_pages = info.totalram >> 3;
407         else
408                 pool->max_pages = CONFIG_NVMAP_PAGE_POOL_SIZE;
409
410         if (pool->max_pages <= 0 || pool->max_pages >= info.totalram)
411                 goto fail;
412         pool_size[flags] = pool->max_pages;
413         pr_info("nvmap %s page pool size=%d pages",
414                 s_memtype_str[flags], pool->max_pages);
415         pool->page_array = vmalloc(sizeof(void *) * pool->max_pages);
416         pool->shrink_array = vmalloc(sizeof(struct page *) * pool->max_pages);
417         if (!pool->page_array || !pool->shrink_array)
418                 goto fail;
419
420         if (reg) {
421                 reg = 0;
422                 register_shrinker(&nvmap_page_pool_shrinker);
423         }
424
425         nvmap_page_pool_lock(pool);
426         for (i = 0; i < pool->max_pages; i++) {
427                 page = alloc_page(GFP_NVMAP);
428                 if (!page)
429                         goto do_cpa;
430                 if (!nvmap_page_pool_release_locked(pool, page)) {
431                         __free_page(page);
432                         goto do_cpa;
433                 }
434                 if (PageHighMem(page))
435                         highmem_pages++;
436         }
437         si_meminfo(&info);
438         pr_info("nvmap pool = %s, highmem=%d, pool_size=%d,"
439                 "totalram=%lu, freeram=%lu, totalhigh=%lu, freehigh=%lu",
440                 s_memtype_str[flags], highmem_pages, pool->max_pages,
441                 info.totalram, info.freeram, info.totalhigh, info.freehigh);
442 do_cpa:
443         (*s_cpa[flags])(pool->page_array, pool->npages);
444         nvmap_page_pool_unlock(pool);
445         return 0;
446 fail:
447         pool->max_pages = 0;
448         vfree(pool->shrink_array);
449         vfree(pool->page_array);
450         return -ENOMEM;
451 }
452 #endif
453
454 static inline void *altalloc(size_t len)
455 {
456         if (len >= PAGELIST_VMALLOC_MIN)
457                 return vmalloc(len);
458         else
459                 return kmalloc(len, GFP_KERNEL);
460 }
461
462 static inline void altfree(void *ptr, size_t len)
463 {
464         if (!ptr)
465                 return;
466
467         if (len >= PAGELIST_VMALLOC_MIN)
468                 vfree(ptr);
469         else
470                 kfree(ptr);
471 }
472
473 void _nvmap_handle_free(struct nvmap_handle *h)
474 {
475         struct nvmap_share *share = nvmap_get_share_from_dev(h->dev);
476         unsigned int i, nr_page, page_index = 0;
477 #ifdef CONFIG_NVMAP_PAGE_POOLS
478         struct nvmap_page_pool *pool = NULL;
479 #endif
480
481         if (nvmap_handle_remove(h->dev, h) != 0)
482                 return;
483
484         if (!h->alloc)
485                 goto out;
486
487         if (!h->heap_pgalloc) {
488                 nvmap_usecount_inc(h);
489                 nvmap_heap_free(h->carveout);
490                 goto out;
491         }
492
493         nr_page = DIV_ROUND_UP(h->size, PAGE_SIZE);
494
495         BUG_ON(h->size & ~PAGE_MASK);
496         BUG_ON(!h->pgalloc.pages);
497
498         nvmap_mru_remove(share, h);
499
500 #ifdef CONFIG_NVMAP_PAGE_POOLS
501         if (h->flags < NVMAP_NUM_POOLS)
502                 pool = &share->pools[h->flags];
503
504         while (page_index < nr_page) {
505                 if (!nvmap_page_pool_release(pool,
506                     h->pgalloc.pages[page_index]))
507                         break;
508                 page_index++;
509         }
510 #endif
511
512         if (page_index == nr_page)
513                 goto skip_attr_restore;
514
515         /* Restore page attributes. */
516         if (h->flags == NVMAP_HANDLE_WRITE_COMBINE ||
517             h->flags == NVMAP_HANDLE_UNCACHEABLE ||
518             h->flags == NVMAP_HANDLE_INNER_CACHEABLE)
519                 set_pages_array_wb(&h->pgalloc.pages[page_index],
520                                 nr_page - page_index);
521
522 skip_attr_restore:
523         if (h->pgalloc.area)
524                 tegra_iovmm_free_vm(h->pgalloc.area);
525
526         for (i = page_index; i < nr_page; i++)
527                 __free_page(h->pgalloc.pages[i]);
528
529         altfree(h->pgalloc.pages, nr_page * sizeof(struct page *));
530
531 out:
532         kfree(h);
533 }
534
535 static struct page *nvmap_alloc_pages_exact(gfp_t gfp, size_t size)
536 {
537         struct page *page, *p, *e;
538         unsigned int order;
539
540         size = PAGE_ALIGN(size);
541         order = get_order(size);
542         page = alloc_pages(gfp, order);
543
544         if (!page)
545                 return NULL;
546
547         split_page(page, order);
548         e = page + (1 << order);
549         for (p = page + (size >> PAGE_SHIFT); p < e; p++)
550                 __free_page(p);
551
552         return page;
553 }
554
555 static int handle_page_alloc(struct nvmap_client *client,
556                              struct nvmap_handle *h, bool contiguous)
557 {
558         size_t size = PAGE_ALIGN(h->size);
559         unsigned int nr_page = size >> PAGE_SHIFT;
560         pgprot_t prot;
561         unsigned int i = 0, page_index = 0;
562         struct page **pages;
563 #ifdef CONFIG_NVMAP_PAGE_POOLS
564         struct nvmap_page_pool *pool = NULL;
565         struct nvmap_share *share = nvmap_get_share_from_dev(h->dev);
566 #endif
567
568         pages = altalloc(nr_page * sizeof(*pages));
569         if (!pages)
570                 return -ENOMEM;
571
572         prot = nvmap_pgprot(h, pgprot_kernel);
573
574         h->pgalloc.area = NULL;
575         if (contiguous) {
576                 struct page *page;
577                 page = nvmap_alloc_pages_exact(GFP_NVMAP, size);
578                 if (!page)
579                         goto fail;
580
581                 for (i = 0; i < nr_page; i++)
582                         pages[i] = nth_page(page, i);
583
584         } else {
585 #ifdef CONFIG_NVMAP_PAGE_POOLS
586                 if (h->flags < NVMAP_NUM_POOLS)
587                         pool = &share->pools[h->flags];
588
589                 for (i = 0; i < nr_page; i++) {
590                         /* Get pages from pool, if available. */
591                         pages[i] = nvmap_page_pool_alloc(pool);
592                         if (!pages[i])
593                                 break;
594                         page_index++;
595                 }
596 #endif
597                 for (; i < nr_page; i++) {
598                         pages[i] = nvmap_alloc_pages_exact(GFP_NVMAP,
599                                 PAGE_SIZE);
600                         if (!pages[i])
601                                 goto fail;
602                 }
603
604 #ifndef CONFIG_NVMAP_RECLAIM_UNPINNED_VM
605                 h->pgalloc.area = tegra_iovmm_create_vm(client->share->iovmm,
606                                         NULL, size, h->align, prot,
607                                         h->pgalloc.iovm_addr);
608                 if (!h->pgalloc.area)
609                         goto fail;
610
611                 h->pgalloc.dirty = true;
612 #endif
613         }
614
615         if (nr_page == page_index)
616                 goto skip_attr_change;
617
618         /* Update the pages mapping in kernel page table. */
619         if (h->flags == NVMAP_HANDLE_WRITE_COMBINE)
620                 set_pages_array_wc(&pages[page_index],
621                                 nr_page - page_index);
622         else if (h->flags == NVMAP_HANDLE_UNCACHEABLE)
623                 set_pages_array_uc(&pages[page_index],
624                                 nr_page - page_index);
625         else if (h->flags == NVMAP_HANDLE_INNER_CACHEABLE)
626                 set_pages_array_iwb(&pages[page_index],
627                                 nr_page - page_index);
628
629 skip_attr_change:
630         h->size = size;
631         h->pgalloc.pages = pages;
632         h->pgalloc.contig = contiguous;
633         INIT_LIST_HEAD(&h->pgalloc.mru_list);
634         return 0;
635
636 fail:
637         while (i--) {
638                 set_pages_array_wb(&pages[i], 1);
639                 __free_page(pages[i]);
640         }
641         altfree(pages, nr_page * sizeof(*pages));
642         wmb();
643         return -ENOMEM;
644 }
645
646 static void alloc_handle(struct nvmap_client *client,
647                          struct nvmap_handle *h, unsigned int type)
648 {
649         BUG_ON(type & (type - 1));
650
651 #ifdef CONFIG_NVMAP_CONVERT_CARVEOUT_TO_IOVMM
652 #define __NVMAP_HEAP_CARVEOUT   (NVMAP_HEAP_CARVEOUT_IRAM | NVMAP_HEAP_CARVEOUT_VPR)
653 #define __NVMAP_HEAP_IOVMM      (NVMAP_HEAP_IOVMM | NVMAP_HEAP_CARVEOUT_GENERIC)
654         if (type & NVMAP_HEAP_CARVEOUT_GENERIC) {
655 #ifdef CONFIG_NVMAP_ALLOW_SYSMEM
656                 if (h->size <= PAGE_SIZE) {
657                         PR_INFO("###CARVEOUT CONVERTED TO SYSMEM "
658                                 "0x%x bytes %s(%d)###\n",
659                                 h->size, current->comm, current->pid);
660                         goto sysheap;
661                 }
662 #endif
663                 PR_INFO("###CARVEOUT CONVERTED TO IOVM "
664                         "0x%x bytes %s(%d)###\n",
665                         h->size, current->comm, current->pid);
666         }
667 #else
668 #define __NVMAP_HEAP_CARVEOUT   NVMAP_HEAP_CARVEOUT_MASK
669 #define __NVMAP_HEAP_IOVMM      NVMAP_HEAP_IOVMM
670 #endif
671
672         if (type & __NVMAP_HEAP_CARVEOUT) {
673                 struct nvmap_heap_block *b;
674 #ifdef CONFIG_NVMAP_CONVERT_CARVEOUT_TO_IOVMM
675                 PR_INFO("###IRAM REQUEST RETAINED "
676                         "0x%x bytes %s(%d)###\n",
677                         h->size, current->comm, current->pid);
678 #endif
679                 /* Protect handle from relocation */
680                 nvmap_usecount_inc(h);
681
682                 b = nvmap_carveout_alloc(client, h, type);
683                 if (b) {
684                         h->heap_pgalloc = false;
685                         h->alloc = true;
686                         nvmap_carveout_commit_add(client,
687                                 nvmap_heap_to_arg(nvmap_block_to_heap(b)),
688                                 h->size);
689                 }
690                 nvmap_usecount_dec(h);
691
692         } else if (type & __NVMAP_HEAP_IOVMM) {
693                 size_t reserved = PAGE_ALIGN(h->size);
694                 int commit = 0;
695                 int ret;
696
697                 /* increment the committed IOVM space prior to allocation
698                  * to avoid race conditions with other threads simultaneously
699                  * allocating. */
700                 commit = atomic_add_return(reserved,
701                                             &client->iovm_commit);
702
703                 if (commit < client->iovm_limit)
704                         ret = handle_page_alloc(client, h, false);
705                 else
706                         ret = -ENOMEM;
707
708                 if (!ret) {
709                         h->heap_pgalloc = true;
710                         h->alloc = true;
711                 } else {
712                         atomic_sub(reserved, &client->iovm_commit);
713                 }
714
715         } else if (type & NVMAP_HEAP_SYSMEM) {
716 #if defined(CONFIG_NVMAP_CONVERT_CARVEOUT_TO_IOVMM) && \
717         defined(CONFIG_NVMAP_ALLOW_SYSMEM)
718 sysheap:
719 #endif
720                 if (handle_page_alloc(client, h, true) == 0) {
721                         BUG_ON(!h->pgalloc.contig);
722                         h->heap_pgalloc = true;
723                         h->alloc = true;
724                 }
725         }
726 }
727
728 /* small allocations will try to allocate from generic OS memory before
729  * any of the limited heaps, to increase the effective memory for graphics
730  * allocations, and to reduce fragmentation of the graphics heaps with
731  * sub-page splinters */
732 static const unsigned int heap_policy_small[] = {
733         NVMAP_HEAP_CARVEOUT_VPR,
734         NVMAP_HEAP_CARVEOUT_IRAM,
735 #ifdef CONFIG_NVMAP_ALLOW_SYSMEM
736         NVMAP_HEAP_SYSMEM,
737 #endif
738         NVMAP_HEAP_CARVEOUT_MASK,
739         NVMAP_HEAP_IOVMM,
740         0,
741 };
742
743 static const unsigned int heap_policy_large[] = {
744         NVMAP_HEAP_CARVEOUT_VPR,
745         NVMAP_HEAP_CARVEOUT_IRAM,
746         NVMAP_HEAP_IOVMM,
747         NVMAP_HEAP_CARVEOUT_MASK,
748 #ifdef CONFIG_NVMAP_ALLOW_SYSMEM
749         NVMAP_HEAP_SYSMEM,
750 #endif
751         0,
752 };
753
754 /* Do not override single page policy if there is not much space to
755 avoid invoking system oom killer. */
756 #define NVMAP_SMALL_POLICY_SYSMEM_THRESHOLD 50000000
757
758 int nvmap_alloc_handle_id(struct nvmap_client *client,
759                           unsigned long id, unsigned int heap_mask,
760                           size_t align, unsigned int flags)
761 {
762         struct nvmap_handle *h = NULL;
763         const unsigned int *alloc_policy;
764         int nr_page;
765         int err = -ENOMEM;
766
767         h = nvmap_get_handle_id(client, id);
768
769         if (!h)
770                 return -EINVAL;
771
772         if (h->alloc)
773                 goto out;
774
775         h->userflags = flags;
776         nr_page = ((h->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
777         h->secure = !!(flags & NVMAP_HANDLE_SECURE);
778         h->flags = (flags & NVMAP_HANDLE_CACHE_FLAG);
779         h->align = max_t(size_t, align, L1_CACHE_BYTES);
780
781 #ifndef CONFIG_TEGRA_IOVMM
782         if (heap_mask & NVMAP_HEAP_IOVMM) {
783                 heap_mask &= NVMAP_HEAP_IOVMM;
784                 heap_mask |= NVMAP_HEAP_CARVEOUT_GENERIC;
785         }
786 #endif
787 #ifndef CONFIG_NVMAP_CONVERT_CARVEOUT_TO_IOVMM
788 #ifdef CONFIG_NVMAP_ALLOW_SYSMEM
789         /* Allow single pages allocations in system memory to save
790          * carveout space and avoid extra iovm mappings */
791         if (nr_page == 1) {
792                 if (heap_mask & NVMAP_HEAP_IOVMM)
793                         heap_mask |= NVMAP_HEAP_SYSMEM;
794                 else if (heap_mask & NVMAP_HEAP_CARVEOUT_GENERIC) {
795                         /* Calculate size of free physical pages
796                          * managed by kernel */
797                         unsigned long freeMem =
798                                 (global_page_state(NR_FREE_PAGES) +
799                                 global_page_state(NR_FILE_PAGES) -
800                                 total_swapcache_pages) << PAGE_SHIFT;
801
802                         if (freeMem > NVMAP_SMALL_POLICY_SYSMEM_THRESHOLD)
803                                 heap_mask |= NVMAP_HEAP_SYSMEM;
804                 }
805         }
806 #endif
807
808         /* This restriction is deprecated as alignments greater than
809            PAGE_SIZE are now correctly handled, but it is retained for
810            AP20 compatibility. */
811         if (h->align > PAGE_SIZE)
812                 heap_mask &= NVMAP_HEAP_CARVEOUT_MASK;
813 #endif
814         /* secure allocations can only be served from secure heaps */
815         if (h->secure)
816                 heap_mask &= NVMAP_SECURE_HEAPS;
817
818         if (!heap_mask) {
819                 err = -EINVAL;
820                 goto out;
821         }
822
823         alloc_policy = (nr_page == 1) ? heap_policy_small : heap_policy_large;
824
825         while (!h->alloc && *alloc_policy) {
826                 unsigned int heap_type;
827
828                 heap_type = *alloc_policy++;
829                 heap_type &= heap_mask;
830
831                 if (!heap_type)
832                         continue;
833
834                 heap_mask &= ~heap_type;
835
836                 while (heap_type && !h->alloc) {
837                         unsigned int heap;
838
839                         /* iterate possible heaps MSB-to-LSB, since higher-
840                          * priority carveouts will have higher usage masks */
841                         heap = 1 << __fls(heap_type);
842                         alloc_handle(client, h, heap);
843                         heap_type &= ~heap;
844                 }
845         }
846
847 out:
848         err = (h->alloc) ? 0 : err;
849         nvmap_handle_put(h);
850         return err;
851 }
852
853 void nvmap_free_handle_id(struct nvmap_client *client, unsigned long id)
854 {
855         struct nvmap_handle_ref *ref;
856         struct nvmap_handle *h;
857         int pins;
858
859         nvmap_ref_lock(client);
860
861         ref = _nvmap_validate_id_locked(client, id);
862         if (!ref) {
863                 nvmap_ref_unlock(client);
864                 return;
865         }
866
867         BUG_ON(!ref->handle);
868         h = ref->handle;
869
870         if (atomic_dec_return(&ref->dupes)) {
871                 nvmap_ref_unlock(client);
872                 goto out;
873         }
874
875         smp_rmb();
876         pins = atomic_read(&ref->pin);
877         rb_erase(&ref->node, &client->handle_refs);
878
879         if (h->alloc && h->heap_pgalloc && !h->pgalloc.contig)
880                 atomic_sub(h->size, &client->iovm_commit);
881
882         if (h->alloc && !h->heap_pgalloc) {
883                 mutex_lock(&h->lock);
884                 nvmap_carveout_commit_subtract(client,
885                         nvmap_heap_to_arg(nvmap_block_to_heap(h->carveout)),
886                         h->size);
887                 mutex_unlock(&h->lock);
888         }
889
890         nvmap_ref_unlock(client);
891
892         if (pins)
893                 nvmap_err(client, "%s freeing pinned handle %p\n",
894                           current->group_leader->comm, h);
895
896         while (pins--)
897                 nvmap_unpin_handles(client, &ref->handle, 1);
898
899         if (h->owner == client)
900                 h->owner = NULL;
901
902         kfree(ref);
903
904 out:
905         BUG_ON(!atomic_read(&h->ref));
906         nvmap_handle_put(h);
907 }
908
909 static void add_handle_ref(struct nvmap_client *client,
910                            struct nvmap_handle_ref *ref)
911 {
912         struct rb_node **p, *parent = NULL;
913
914         nvmap_ref_lock(client);
915         p = &client->handle_refs.rb_node;
916         while (*p) {
917                 struct nvmap_handle_ref *node;
918                 parent = *p;
919                 node = rb_entry(parent, struct nvmap_handle_ref, node);
920                 if (ref->handle > node->handle)
921                         p = &parent->rb_right;
922                 else
923                         p = &parent->rb_left;
924         }
925         rb_link_node(&ref->node, parent, p);
926         rb_insert_color(&ref->node, &client->handle_refs);
927         nvmap_ref_unlock(client);
928 }
929
930 struct nvmap_handle_ref *nvmap_create_handle(struct nvmap_client *client,
931                                              size_t size)
932 {
933         struct nvmap_handle *h;
934         struct nvmap_handle_ref *ref = NULL;
935
936         if (!client)
937                 return ERR_PTR(-EINVAL);
938
939         if (!size)
940                 return ERR_PTR(-EINVAL);
941
942         h = kzalloc(sizeof(*h), GFP_KERNEL);
943         if (!h)
944                 return ERR_PTR(-ENOMEM);
945
946         ref = kzalloc(sizeof(*ref), GFP_KERNEL);
947         if (!ref) {
948                 kfree(h);
949                 return ERR_PTR(-ENOMEM);
950         }
951
952         atomic_set(&h->ref, 1);
953         atomic_set(&h->pin, 0);
954         h->owner = client;
955         h->dev = client->dev;
956         BUG_ON(!h->owner);
957         h->size = h->orig_size = size;
958         h->flags = NVMAP_HANDLE_WRITE_COMBINE;
959         mutex_init(&h->lock);
960
961         nvmap_handle_add(client->dev, h);
962
963         atomic_set(&ref->dupes, 1);
964         ref->handle = h;
965         atomic_set(&ref->pin, 0);
966         add_handle_ref(client, ref);
967         return ref;
968 }
969
970 struct nvmap_handle_ref *nvmap_duplicate_handle_id(struct nvmap_client *client,
971                                                    unsigned long id)
972 {
973         struct nvmap_handle_ref *ref = NULL;
974         struct nvmap_handle *h = NULL;
975
976         BUG_ON(!client || client->dev != nvmap_dev);
977         /* on success, the reference count for the handle should be
978          * incremented, so the success paths will not call nvmap_handle_put */
979         h = nvmap_validate_get(client, id);
980
981         if (!h) {
982                 nvmap_debug(client, "%s duplicate handle failed\n",
983                             current->group_leader->comm);
984                 return ERR_PTR(-EPERM);
985         }
986
987         if (!h->alloc) {
988                 nvmap_err(client, "%s duplicating unallocated handle\n",
989                           current->group_leader->comm);
990                 nvmap_handle_put(h);
991                 return ERR_PTR(-EINVAL);
992         }
993
994         nvmap_ref_lock(client);
995         ref = _nvmap_validate_id_locked(client, (unsigned long)h);
996
997         if (ref) {
998                 /* handle already duplicated in client; just increment
999                  * the reference count rather than re-duplicating it */
1000                 atomic_inc(&ref->dupes);
1001                 nvmap_ref_unlock(client);
1002                 return ref;
1003         }
1004
1005         nvmap_ref_unlock(client);
1006
1007         /* verify that adding this handle to the process' access list
1008          * won't exceed the IOVM limit */
1009         if (h->heap_pgalloc && !h->pgalloc.contig) {
1010                 int oc;
1011                 oc = atomic_add_return(h->size, &client->iovm_commit);
1012                 if (oc > client->iovm_limit && !client->super) {
1013                         atomic_sub(h->size, &client->iovm_commit);
1014                         nvmap_handle_put(h);
1015                         nvmap_err(client, "duplicating %p in %s over-commits"
1016                                   " IOVMM space\n", (void *)id,
1017                                   current->group_leader->comm);
1018                         return ERR_PTR(-ENOMEM);
1019                 }
1020         }
1021
1022         ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1023         if (!ref) {
1024                 nvmap_handle_put(h);
1025                 return ERR_PTR(-ENOMEM);
1026         }
1027
1028         if (!h->heap_pgalloc) {
1029                 mutex_lock(&h->lock);
1030                 nvmap_carveout_commit_add(client,
1031                         nvmap_heap_to_arg(nvmap_block_to_heap(h->carveout)),
1032                         h->size);
1033                 mutex_unlock(&h->lock);
1034         }
1035
1036         atomic_set(&ref->dupes, 1);
1037         ref->handle = h;
1038         atomic_set(&ref->pin, 0);
1039         add_handle_ref(client, ref);
1040         return ref;
1041 }