]> nv-tegra.nvidia Code Review - linux-2.6.git/blob - drivers/video/tegra/nvmap/nvmap_handle.c
video: tegra: nvmap: Set PAGELIST_VMALLOC_MIN size correct
[linux-2.6.git] / drivers / video / tegra / nvmap / nvmap_handle.c
1 /*
2  * drivers/video/tegra/nvmap/nvmap_handle.c
3  *
4  * Handle allocation and freeing routines for nvmap
5  *
6  * Copyright (c) 2009-2012, NVIDIA CORPORATION. All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #define pr_fmt(fmt)     "%s: " fmt, __func__
24
25 #include <linux/err.h>
26 #include <linux/kernel.h>
27 #include <linux/list.h>
28 #include <linux/mm.h>
29 #include <linux/rbtree.h>
30 #include <linux/slab.h>
31 #include <linux/vmalloc.h>
32 #include <linux/fs.h>
33 #include <linux/shrinker.h>
34 #include <linux/moduleparam.h>
35 #include <linux/nvmap.h>
36
37 #include <asm/cacheflush.h>
38 #include <asm/outercache.h>
39 #include <asm/pgtable.h>
40
41 #include <mach/iovmm.h>
42 #include <trace/events/nvmap.h>
43
44 #include "nvmap.h"
45 #include "nvmap_mru.h"
46 #include "nvmap_common.h"
47
48 #define NVMAP_SECURE_HEAPS      (NVMAP_HEAP_CARVEOUT_IRAM | NVMAP_HEAP_IOVMM | \
49                                  NVMAP_HEAP_CARVEOUT_VPR)
50 #ifdef CONFIG_NVMAP_HIGHMEM_ONLY
51 #define GFP_NVMAP               (__GFP_HIGHMEM | __GFP_NOWARN)
52 #else
53 #define GFP_NVMAP               (GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN)
54 #endif
55 /* handles may be arbitrarily large (16+MiB), and any handle allocated from
56  * the kernel (i.e., not a carveout handle) includes its array of pages. to
57  * preserve kmalloc space, if the array of pages exceeds PAGELIST_VMALLOC_MIN,
58  * the array is allocated using vmalloc. */
59 #define PAGELIST_VMALLOC_MIN    (PAGE_SIZE)
60
61 #ifdef CONFIG_NVMAP_PAGE_POOLS
62
63 #define NVMAP_TEST_PAGE_POOL_SHRINKER 1
64 static bool enable_pp = 1;
65 static int pool_size[NVMAP_NUM_POOLS];
66
67 static char *s_memtype_str[] = {
68         "uc",
69         "wc",
70         "iwb",
71         "wb",
72 };
73
74 static inline void nvmap_page_pool_lock(struct nvmap_page_pool *pool)
75 {
76         mutex_lock(&pool->lock);
77 }
78
79 static inline void nvmap_page_pool_unlock(struct nvmap_page_pool *pool)
80 {
81         mutex_unlock(&pool->lock);
82 }
83
84 static struct page *nvmap_page_pool_alloc_locked(struct nvmap_page_pool *pool)
85 {
86         struct page *page = NULL;
87
88         if (pool->npages > 0)
89                 page = pool->page_array[--pool->npages];
90         return page;
91 }
92
93 static struct page *nvmap_page_pool_alloc(struct nvmap_page_pool *pool)
94 {
95         struct page *page = NULL;
96
97         if (pool) {
98                 nvmap_page_pool_lock(pool);
99                 page = nvmap_page_pool_alloc_locked(pool);
100                 nvmap_page_pool_unlock(pool);
101         }
102         return page;
103 }
104
105 static bool nvmap_page_pool_release_locked(struct nvmap_page_pool *pool,
106                                             struct page *page)
107 {
108         int ret = false;
109
110         if (enable_pp && pool->npages < pool->max_pages) {
111                 pool->page_array[pool->npages++] = page;
112                 ret = true;
113         }
114         return ret;
115 }
116
117 static bool nvmap_page_pool_release(struct nvmap_page_pool *pool,
118                                           struct page *page)
119 {
120         int ret = false;
121
122         if (pool) {
123                 nvmap_page_pool_lock(pool);
124                 ret = nvmap_page_pool_release_locked(pool, page);
125                 nvmap_page_pool_unlock(pool);
126         }
127         return ret;
128 }
129
130 static int nvmap_page_pool_get_available_count(struct nvmap_page_pool *pool)
131 {
132         return pool->npages;
133 }
134
135 static int nvmap_page_pool_free(struct nvmap_page_pool *pool, int nr_free)
136 {
137         int i = nr_free;
138         int idx = 0;
139         struct page *page;
140
141         if (!nr_free)
142                 return nr_free;
143         nvmap_page_pool_lock(pool);
144         while (i) {
145                 page = nvmap_page_pool_alloc_locked(pool);
146                 if (!page)
147                         break;
148                 pool->shrink_array[idx++] = page;
149                 i--;
150         }
151
152         if (idx)
153                 set_pages_array_wb(pool->shrink_array, idx);
154         while (idx--)
155                 __free_page(pool->shrink_array[idx]);
156         nvmap_page_pool_unlock(pool);
157         return i;
158 }
159
160 static int nvmap_page_pool_get_unused_pages(void)
161 {
162         unsigned int i;
163         int total = 0;
164         struct nvmap_share *share = nvmap_get_share_from_dev(nvmap_dev);
165
166         for (i = 0; i < NVMAP_NUM_POOLS; i++)
167                 total += nvmap_page_pool_get_available_count(&share->pools[i]);
168
169         return total;
170 }
171
172 static void nvmap_page_pool_resize(struct nvmap_page_pool *pool, int size)
173 {
174         int available_pages;
175         int pages_to_release = 0;
176         struct page **page_array = NULL;
177         struct page **shrink_array = NULL;
178
179         if (size == pool->max_pages)
180                 return;
181 repeat:
182         nvmap_page_pool_free(pool, pages_to_release);
183         nvmap_page_pool_lock(pool);
184         available_pages = nvmap_page_pool_get_available_count(pool);
185         if (available_pages > size) {
186                 nvmap_page_pool_unlock(pool);
187                 pages_to_release = available_pages - size;
188                 goto repeat;
189         }
190
191         if (size == 0) {
192                 vfree(pool->page_array);
193                 vfree(pool->shrink_array);
194                 pool->page_array = pool->shrink_array = NULL;
195                 goto out;
196         }
197
198         page_array = vmalloc(sizeof(struct page *) * size);
199         shrink_array = vmalloc(sizeof(struct page *) * size);
200         if (!page_array || !shrink_array)
201                 goto fail;
202
203         memcpy(page_array, pool->page_array,
204                 pool->npages * sizeof(struct page *));
205         vfree(pool->page_array);
206         vfree(pool->shrink_array);
207         pool->page_array = page_array;
208         pool->shrink_array = shrink_array;
209 out:
210         pr_debug("%s pool resized to %d from %d pages",
211                 s_memtype_str[pool->flags], size, pool->max_pages);
212         pool->max_pages = size;
213         goto exit;
214 fail:
215         vfree(page_array);
216         vfree(shrink_array);
217         pr_err("failed");
218 exit:
219         nvmap_page_pool_unlock(pool);
220 }
221
222 static int nvmap_page_pool_shrink(struct shrinker *shrinker,
223                                   struct shrink_control *sc)
224 {
225         unsigned int i;
226         unsigned int pool_offset;
227         struct nvmap_page_pool *pool;
228         int shrink_pages = sc->nr_to_scan;
229         static atomic_t start_pool = ATOMIC_INIT(-1);
230         struct nvmap_share *share = nvmap_get_share_from_dev(nvmap_dev);
231
232         if (!shrink_pages)
233                 goto out;
234
235         pr_debug("sh_pages=%d", shrink_pages);
236
237         for (i = 0; i < NVMAP_NUM_POOLS && shrink_pages; i++) {
238                 pool_offset = atomic_add_return(1, &start_pool) %
239                                 NVMAP_NUM_POOLS;
240                 pool = &share->pools[pool_offset];
241                 shrink_pages = nvmap_page_pool_free(pool, shrink_pages);
242         }
243 out:
244         return nvmap_page_pool_get_unused_pages();
245 }
246
247 static struct shrinker nvmap_page_pool_shrinker = {
248         .shrink = nvmap_page_pool_shrink,
249         .seeks = 1,
250 };
251
252 static void shrink_page_pools(int *total_pages, int *available_pages)
253 {
254         struct shrink_control sc;
255
256         sc.gfp_mask = GFP_KERNEL;
257         sc.nr_to_scan = 0;
258         *total_pages = nvmap_page_pool_shrink(NULL, &sc);
259         sc.nr_to_scan = *total_pages * 2;
260         *available_pages = nvmap_page_pool_shrink(NULL, &sc);
261 }
262
263 #if NVMAP_TEST_PAGE_POOL_SHRINKER
264 static bool shrink_pp;
265 static int shrink_set(const char *arg, const struct kernel_param *kp)
266 {
267         int cpu = smp_processor_id();
268         unsigned long long t1, t2;
269         int total_pages, available_pages;
270
271         param_set_bool(arg, kp);
272
273         if (shrink_pp) {
274                 t1 = cpu_clock(cpu);
275                 shrink_page_pools(&total_pages, &available_pages);
276                 t2 = cpu_clock(cpu);
277                 pr_info("shrink page pools: time=%lldns, "
278                         "total_pages_released=%d, free_pages_available=%d",
279                         t2-t1, total_pages, available_pages);
280         }
281         return 0;
282 }
283
284 static int shrink_get(char *buff, const struct kernel_param *kp)
285 {
286         return param_get_bool(buff, kp);
287 }
288
289 static struct kernel_param_ops shrink_ops = {
290         .get = shrink_get,
291         .set = shrink_set,
292 };
293
294 module_param_cb(shrink_page_pools, &shrink_ops, &shrink_pp, 0644);
295 #endif
296
297 static int enable_pp_set(const char *arg, const struct kernel_param *kp)
298 {
299         int total_pages, available_pages;
300
301         param_set_bool(arg, kp);
302
303         if (!enable_pp) {
304                 shrink_page_pools(&total_pages, &available_pages);
305                 pr_info("disabled page pools and released pages, "
306                         "total_pages_released=%d, free_pages_available=%d",
307                         total_pages, available_pages);
308         }
309         return 0;
310 }
311
312 static int enable_pp_get(char *buff, const struct kernel_param *kp)
313 {
314         return param_get_int(buff, kp);
315 }
316
317 static struct kernel_param_ops enable_pp_ops = {
318         .get = enable_pp_get,
319         .set = enable_pp_set,
320 };
321
322 module_param_cb(enable_page_pools, &enable_pp_ops, &enable_pp, 0644);
323
324 #define POOL_SIZE_SET(m, i) \
325 static int pool_size_##m##_set(const char *arg, const struct kernel_param *kp) \
326 { \
327         struct nvmap_share *share = nvmap_get_share_from_dev(nvmap_dev); \
328         param_set_int(arg, kp); \
329         nvmap_page_pool_resize(&share->pools[i], pool_size[i]); \
330         return 0; \
331 }
332
333 #define POOL_SIZE_GET(m) \
334 static int pool_size_##m##_get(char *buff, const struct kernel_param *kp) \
335 { \
336         return param_get_int(buff, kp); \
337 }
338
339 #define POOL_SIZE_OPS(m) \
340 static struct kernel_param_ops pool_size_##m##_ops = { \
341         .get = pool_size_##m##_get, \
342         .set = pool_size_##m##_set, \
343 };
344
345 #define POOL_SIZE_MOUDLE_PARAM_CB(m, i) \
346 module_param_cb(m##_pool_size, &pool_size_##m##_ops, &pool_size[i], 0644)
347
348 POOL_SIZE_SET(uc, NVMAP_HANDLE_UNCACHEABLE);
349 POOL_SIZE_GET(uc);
350 POOL_SIZE_OPS(uc);
351 POOL_SIZE_MOUDLE_PARAM_CB(uc, NVMAP_HANDLE_UNCACHEABLE);
352
353 POOL_SIZE_SET(wc, NVMAP_HANDLE_WRITE_COMBINE);
354 POOL_SIZE_GET(wc);
355 POOL_SIZE_OPS(wc);
356 POOL_SIZE_MOUDLE_PARAM_CB(wc, NVMAP_HANDLE_WRITE_COMBINE);
357
358 POOL_SIZE_SET(iwb, NVMAP_HANDLE_INNER_CACHEABLE);
359 POOL_SIZE_GET(iwb);
360 POOL_SIZE_OPS(iwb);
361 POOL_SIZE_MOUDLE_PARAM_CB(iwb, NVMAP_HANDLE_INNER_CACHEABLE);
362
363 POOL_SIZE_SET(wb, NVMAP_HANDLE_CACHEABLE);
364 POOL_SIZE_GET(wb);
365 POOL_SIZE_OPS(wb);
366 POOL_SIZE_MOUDLE_PARAM_CB(wb, NVMAP_HANDLE_CACHEABLE);
367
368 int nvmap_page_pool_init(struct nvmap_page_pool *pool, int flags)
369 {
370         struct page *page;
371         int i;
372         static int reg = 1;
373         struct sysinfo info;
374         int highmem_pages = 0;
375         typedef int (*set_pages_array) (struct page **pages, int addrinarray);
376         set_pages_array s_cpa[] = {
377                 set_pages_array_uc,
378                 set_pages_array_wc,
379                 set_pages_array_iwb,
380                 set_pages_array_wb
381         };
382
383         BUG_ON(flags >= NVMAP_NUM_POOLS);
384         memset(pool, 0x0, sizeof(*pool));
385         mutex_init(&pool->lock);
386         pool->flags = flags;
387
388         /* No default pool for cached memory. */
389         if (flags == NVMAP_HANDLE_CACHEABLE)
390                 return 0;
391
392         si_meminfo(&info);
393         if (!pool_size[flags] && !CONFIG_NVMAP_PAGE_POOL_SIZE)
394                 /* Use 3/8th of total ram for page pools.
395                  * 1/8th for uc, 1/8th for wc and 1/8th for iwb.
396                  */
397                 pool->max_pages = info.totalram >> 3;
398         else
399                 pool->max_pages = CONFIG_NVMAP_PAGE_POOL_SIZE;
400
401         if (pool->max_pages <= 0 || pool->max_pages >= info.totalram)
402                 goto fail;
403         pool_size[flags] = pool->max_pages;
404         pr_info("nvmap %s page pool size=%d pages",
405                 s_memtype_str[flags], pool->max_pages);
406         pool->page_array = vmalloc(sizeof(void *) * pool->max_pages);
407         pool->shrink_array = vmalloc(sizeof(struct page *) * pool->max_pages);
408         if (!pool->page_array || !pool->shrink_array)
409                 goto fail;
410
411         if (reg) {
412                 reg = 0;
413                 register_shrinker(&nvmap_page_pool_shrinker);
414         }
415
416         nvmap_page_pool_lock(pool);
417         for (i = 0; i < pool->max_pages; i++) {
418                 page = alloc_page(GFP_NVMAP);
419                 if (!page)
420                         goto do_cpa;
421                 if (!nvmap_page_pool_release_locked(pool, page)) {
422                         __free_page(page);
423                         goto do_cpa;
424                 }
425                 if (PageHighMem(page))
426                         highmem_pages++;
427         }
428         si_meminfo(&info);
429         pr_info("nvmap pool = %s, highmem=%d, pool_size=%d,"
430                 "totalram=%lu, freeram=%lu, totalhigh=%lu, freehigh=%lu",
431                 s_memtype_str[flags], highmem_pages, pool->max_pages,
432                 info.totalram, info.freeram, info.totalhigh, info.freehigh);
433 do_cpa:
434         (*s_cpa[flags])(pool->page_array, pool->npages);
435         nvmap_page_pool_unlock(pool);
436         return 0;
437 fail:
438         pool->max_pages = 0;
439         vfree(pool->shrink_array);
440         vfree(pool->page_array);
441         return -ENOMEM;
442 }
443 #endif
444
445 static inline void *altalloc(size_t len)
446 {
447         if (len > PAGELIST_VMALLOC_MIN)
448                 return vmalloc(len);
449         else
450                 return kmalloc(len, GFP_KERNEL);
451 }
452
453 static inline void altfree(void *ptr, size_t len)
454 {
455         if (!ptr)
456                 return;
457
458         if (len > PAGELIST_VMALLOC_MIN)
459                 vfree(ptr);
460         else
461                 kfree(ptr);
462 }
463
464 void _nvmap_handle_free(struct nvmap_handle *h)
465 {
466         struct nvmap_share *share = nvmap_get_share_from_dev(h->dev);
467         unsigned int i, nr_page, page_index = 0;
468 #ifdef CONFIG_NVMAP_PAGE_POOLS
469         struct nvmap_page_pool *pool = NULL;
470 #endif
471
472         if (nvmap_handle_remove(h->dev, h) != 0)
473                 return;
474
475         if (!h->alloc)
476                 goto out;
477
478         if (!h->heap_pgalloc) {
479                 nvmap_usecount_inc(h);
480                 nvmap_heap_free(h->carveout);
481                 goto out;
482         }
483
484         nr_page = DIV_ROUND_UP(h->size, PAGE_SIZE);
485
486         BUG_ON(h->size & ~PAGE_MASK);
487         BUG_ON(!h->pgalloc.pages);
488
489         nvmap_mru_remove(share, h);
490
491 #ifdef CONFIG_NVMAP_PAGE_POOLS
492         if (h->flags < NVMAP_NUM_POOLS)
493                 pool = &share->pools[h->flags];
494
495         while (page_index < nr_page) {
496                 if (!nvmap_page_pool_release(pool,
497                     h->pgalloc.pages[page_index]))
498                         break;
499                 page_index++;
500         }
501 #endif
502
503         if (page_index == nr_page)
504                 goto skip_attr_restore;
505
506         /* Restore page attributes. */
507         if (h->flags == NVMAP_HANDLE_WRITE_COMBINE ||
508             h->flags == NVMAP_HANDLE_UNCACHEABLE ||
509             h->flags == NVMAP_HANDLE_INNER_CACHEABLE)
510                 set_pages_array_wb(&h->pgalloc.pages[page_index],
511                                 nr_page - page_index);
512
513 skip_attr_restore:
514         if (h->pgalloc.area)
515                 tegra_iovmm_free_vm(h->pgalloc.area);
516
517         for (i = page_index; i < nr_page; i++)
518                 __free_page(h->pgalloc.pages[i]);
519
520         altfree(h->pgalloc.pages, nr_page * sizeof(struct page *));
521
522 out:
523         kfree(h);
524 }
525
526 static struct page *nvmap_alloc_pages_exact(gfp_t gfp, size_t size)
527 {
528         struct page *page, *p, *e;
529         unsigned int order;
530
531         size = PAGE_ALIGN(size);
532         order = get_order(size);
533         page = alloc_pages(gfp, order);
534
535         if (!page)
536                 return NULL;
537
538         split_page(page, order);
539         e = page + (1 << order);
540         for (p = page + (size >> PAGE_SHIFT); p < e; p++)
541                 __free_page(p);
542
543         return page;
544 }
545
546 static int handle_page_alloc(struct nvmap_client *client,
547                              struct nvmap_handle *h, bool contiguous)
548 {
549         size_t size = PAGE_ALIGN(h->size);
550         unsigned int nr_page = size >> PAGE_SHIFT;
551         pgprot_t prot;
552         unsigned int i = 0, page_index = 0;
553         struct page **pages;
554 #ifdef CONFIG_NVMAP_PAGE_POOLS
555         struct nvmap_page_pool *pool = NULL;
556         struct nvmap_share *share = nvmap_get_share_from_dev(h->dev);
557 #endif
558
559         pages = altalloc(nr_page * sizeof(*pages));
560         if (!pages)
561                 return -ENOMEM;
562
563         prot = nvmap_pgprot(h, pgprot_kernel);
564
565         h->pgalloc.area = NULL;
566         if (contiguous) {
567                 struct page *page;
568                 page = nvmap_alloc_pages_exact(GFP_NVMAP, size);
569                 if (!page)
570                         goto fail;
571
572                 for (i = 0; i < nr_page; i++)
573                         pages[i] = nth_page(page, i);
574
575         } else {
576 #ifdef CONFIG_NVMAP_PAGE_POOLS
577                 if (h->flags < NVMAP_NUM_POOLS)
578                         pool = &share->pools[h->flags];
579
580                 for (i = 0; i < nr_page; i++) {
581                         /* Get pages from pool, if available. */
582                         pages[i] = nvmap_page_pool_alloc(pool);
583                         if (!pages[i])
584                                 break;
585                         page_index++;
586                 }
587 #endif
588                 for (; i < nr_page; i++) {
589                         pages[i] = nvmap_alloc_pages_exact(GFP_NVMAP,
590                                 PAGE_SIZE);
591                         if (!pages[i])
592                                 goto fail;
593                 }
594
595 #ifndef CONFIG_NVMAP_RECLAIM_UNPINNED_VM
596                 h->pgalloc.area = tegra_iovmm_create_vm(client->share->iovmm,
597                                         NULL, size, h->align, prot,
598                                         h->pgalloc.iovm_addr);
599                 if (!h->pgalloc.area)
600                         goto fail;
601
602                 h->pgalloc.dirty = true;
603 #endif
604         }
605
606         if (nr_page == page_index)
607                 goto skip_attr_change;
608
609         /* Update the pages mapping in kernel page table. */
610         if (h->flags == NVMAP_HANDLE_WRITE_COMBINE)
611                 set_pages_array_wc(&pages[page_index],
612                                 nr_page - page_index);
613         else if (h->flags == NVMAP_HANDLE_UNCACHEABLE)
614                 set_pages_array_uc(&pages[page_index],
615                                 nr_page - page_index);
616         else if (h->flags == NVMAP_HANDLE_INNER_CACHEABLE)
617                 set_pages_array_iwb(&pages[page_index],
618                                 nr_page - page_index);
619
620 skip_attr_change:
621         h->size = size;
622         h->pgalloc.pages = pages;
623         h->pgalloc.contig = contiguous;
624         INIT_LIST_HEAD(&h->pgalloc.mru_list);
625         return 0;
626
627 fail:
628         while (i--) {
629                 set_pages_array_wb(&pages[i], 1);
630                 __free_page(pages[i]);
631         }
632         altfree(pages, nr_page * sizeof(*pages));
633         wmb();
634         return -ENOMEM;
635 }
636
637 static void alloc_handle(struct nvmap_client *client,
638                          struct nvmap_handle *h, unsigned int type)
639 {
640         unsigned int carveout_mask = NVMAP_HEAP_CARVEOUT_MASK;
641         unsigned int iovmm_mask = NVMAP_HEAP_IOVMM;
642
643         BUG_ON(type & (type - 1));
644
645 #ifdef CONFIG_NVMAP_CONVERT_CARVEOUT_TO_IOVMM
646         /* Convert generic carveout requests to iovmm requests. */
647         carveout_mask &= ~NVMAP_HEAP_CARVEOUT_GENERIC;
648         iovmm_mask |= NVMAP_HEAP_CARVEOUT_GENERIC;
649 #endif
650
651         if (type & carveout_mask) {
652                 struct nvmap_heap_block *b;
653                 /* Protect handle from relocation */
654                 nvmap_usecount_inc(h);
655
656                 b = nvmap_carveout_alloc(client, h, type);
657                 if (b) {
658                         h->heap_pgalloc = false;
659                         h->alloc = true;
660                         nvmap_carveout_commit_add(client,
661                                 nvmap_heap_to_arg(nvmap_block_to_heap(b)),
662                                 h->size);
663                 }
664                 nvmap_usecount_dec(h);
665
666         } else if (type & iovmm_mask) {
667                 size_t reserved = PAGE_ALIGN(h->size);
668                 int commit = 0;
669                 int ret;
670
671                 /* increment the committed IOVM space prior to allocation
672                  * to avoid race conditions with other threads simultaneously
673                  * allocating. */
674                 commit = atomic_add_return(reserved,
675                                             &client->iovm_commit);
676
677                 if (commit < client->iovm_limit)
678                         ret = handle_page_alloc(client, h, false);
679                 else
680                         ret = -ENOMEM;
681
682                 if (!ret) {
683                         h->heap_pgalloc = true;
684                         h->alloc = true;
685                 } else {
686                         atomic_sub(reserved, &client->iovm_commit);
687                 }
688
689         } else if (type & NVMAP_HEAP_SYSMEM) {
690                 if (handle_page_alloc(client, h, true) == 0) {
691                         BUG_ON(!h->pgalloc.contig);
692                         h->heap_pgalloc = true;
693                         h->alloc = true;
694                 }
695         }
696 }
697
698 /* small allocations will try to allocate from generic OS memory before
699  * any of the limited heaps, to increase the effective memory for graphics
700  * allocations, and to reduce fragmentation of the graphics heaps with
701  * sub-page splinters */
702 static const unsigned int heap_policy_small[] = {
703         NVMAP_HEAP_CARVEOUT_VPR,
704         NVMAP_HEAP_CARVEOUT_IRAM,
705 #ifdef CONFIG_NVMAP_ALLOW_SYSMEM
706         NVMAP_HEAP_SYSMEM,
707 #endif
708         NVMAP_HEAP_CARVEOUT_MASK,
709         NVMAP_HEAP_IOVMM,
710         0,
711 };
712
713 static const unsigned int heap_policy_large[] = {
714         NVMAP_HEAP_CARVEOUT_VPR,
715         NVMAP_HEAP_CARVEOUT_IRAM,
716         NVMAP_HEAP_IOVMM,
717         NVMAP_HEAP_CARVEOUT_MASK,
718 #ifdef CONFIG_NVMAP_ALLOW_SYSMEM
719         NVMAP_HEAP_SYSMEM,
720 #endif
721         0,
722 };
723
724 int nvmap_alloc_handle_id(struct nvmap_client *client,
725                           unsigned long id, unsigned int heap_mask,
726                           size_t align, unsigned int flags)
727 {
728         struct nvmap_handle *h = NULL;
729         const unsigned int *alloc_policy;
730         int nr_page;
731         int err = -ENOMEM;
732
733         h = nvmap_get_handle_id(client, id);
734
735         if (!h)
736                 return -EINVAL;
737
738         if (h->alloc)
739                 goto out;
740
741         trace_nvmap_alloc_handle_id(client, id, heap_mask, align, flags);
742         h->userflags = flags;
743         nr_page = ((h->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
744         h->secure = !!(flags & NVMAP_HANDLE_SECURE);
745         h->flags = (flags & NVMAP_HANDLE_CACHE_FLAG);
746         h->align = max_t(size_t, align, L1_CACHE_BYTES);
747
748 #ifndef CONFIG_TEGRA_IOVMM
749         /* convert iovmm requests to generic carveout. */
750         if (heap_mask & NVMAP_HEAP_IOVMM) {
751                 heap_mask = (heap_mask & ~NVMAP_HEAP_IOVMM) |
752                             NVMAP_HEAP_CARVEOUT_GENERIC;
753         }
754 #endif
755 #ifdef CONFIG_NVMAP_ALLOW_SYSMEM
756         /* Allow single pages allocations in system memory to save
757          * carveout space and avoid extra iovm mappings */
758         if (nr_page == 1) {
759                 if (heap_mask &
760                     (NVMAP_HEAP_IOVMM | NVMAP_HEAP_CARVEOUT_GENERIC))
761                         heap_mask |= NVMAP_HEAP_SYSMEM;
762         }
763 #endif
764 #ifndef CONFIG_NVMAP_CONVERT_CARVEOUT_TO_IOVMM
765         /* This restriction is deprecated as alignments greater than
766            PAGE_SIZE are now correctly handled, but it is retained for
767            AP20 compatibility. */
768         if (h->align > PAGE_SIZE)
769                 heap_mask &= NVMAP_HEAP_CARVEOUT_MASK;
770 #endif
771         /* secure allocations can only be served from secure heaps */
772         if (h->secure)
773                 heap_mask &= NVMAP_SECURE_HEAPS;
774
775         if (!heap_mask) {
776                 err = -EINVAL;
777                 goto out;
778         }
779
780         alloc_policy = (nr_page == 1) ? heap_policy_small : heap_policy_large;
781
782         while (!h->alloc && *alloc_policy) {
783                 unsigned int heap_type;
784
785                 heap_type = *alloc_policy++;
786                 heap_type &= heap_mask;
787
788                 if (!heap_type)
789                         continue;
790
791                 heap_mask &= ~heap_type;
792
793                 while (heap_type && !h->alloc) {
794                         unsigned int heap;
795
796                         /* iterate possible heaps MSB-to-LSB, since higher-
797                          * priority carveouts will have higher usage masks */
798                         heap = 1 << __fls(heap_type);
799                         alloc_handle(client, h, heap);
800                         heap_type &= ~heap;
801                 }
802         }
803
804 out:
805         err = (h->alloc) ? 0 : err;
806         nvmap_handle_put(h);
807         return err;
808 }
809
810 void nvmap_free_handle_id(struct nvmap_client *client, unsigned long id)
811 {
812         struct nvmap_handle_ref *ref;
813         struct nvmap_handle *h;
814         int pins;
815
816         nvmap_ref_lock(client);
817
818         ref = _nvmap_validate_id_locked(client, id);
819         if (!ref) {
820                 nvmap_ref_unlock(client);
821                 return;
822         }
823
824         trace_nvmap_free_handle_id(client, id);
825         BUG_ON(!ref->handle);
826         h = ref->handle;
827
828         if (atomic_dec_return(&ref->dupes)) {
829                 nvmap_ref_unlock(client);
830                 goto out;
831         }
832
833         smp_rmb();
834         pins = atomic_read(&ref->pin);
835         rb_erase(&ref->node, &client->handle_refs);
836
837         if (h->alloc && h->heap_pgalloc && !h->pgalloc.contig)
838                 atomic_sub(h->size, &client->iovm_commit);
839
840         if (h->alloc && !h->heap_pgalloc) {
841                 mutex_lock(&h->lock);
842                 nvmap_carveout_commit_subtract(client,
843                         nvmap_heap_to_arg(nvmap_block_to_heap(h->carveout)),
844                         h->size);
845                 mutex_unlock(&h->lock);
846         }
847
848         nvmap_ref_unlock(client);
849
850         if (pins)
851                 nvmap_err(client, "%s freeing pinned handle %p\n",
852                           current->group_leader->comm, h);
853
854         while (pins--)
855                 nvmap_unpin_handles(client, &ref->handle, 1);
856
857         if (h->owner == client)
858                 h->owner = NULL;
859
860         kfree(ref);
861
862 out:
863         BUG_ON(!atomic_read(&h->ref));
864         nvmap_handle_put(h);
865 }
866
867 static void add_handle_ref(struct nvmap_client *client,
868                            struct nvmap_handle_ref *ref)
869 {
870         struct rb_node **p, *parent = NULL;
871
872         nvmap_ref_lock(client);
873         p = &client->handle_refs.rb_node;
874         while (*p) {
875                 struct nvmap_handle_ref *node;
876                 parent = *p;
877                 node = rb_entry(parent, struct nvmap_handle_ref, node);
878                 if (ref->handle > node->handle)
879                         p = &parent->rb_right;
880                 else
881                         p = &parent->rb_left;
882         }
883         rb_link_node(&ref->node, parent, p);
884         rb_insert_color(&ref->node, &client->handle_refs);
885         nvmap_ref_unlock(client);
886 }
887
888 struct nvmap_handle_ref *nvmap_create_handle(struct nvmap_client *client,
889                                              size_t size)
890 {
891         struct nvmap_handle *h;
892         struct nvmap_handle_ref *ref = NULL;
893
894         if (!client)
895                 return ERR_PTR(-EINVAL);
896
897         if (!size)
898                 return ERR_PTR(-EINVAL);
899
900         h = kzalloc(sizeof(*h), GFP_KERNEL);
901         if (!h)
902                 return ERR_PTR(-ENOMEM);
903
904         ref = kzalloc(sizeof(*ref), GFP_KERNEL);
905         if (!ref) {
906                 kfree(h);
907                 return ERR_PTR(-ENOMEM);
908         }
909
910         atomic_set(&h->ref, 1);
911         atomic_set(&h->pin, 0);
912         h->owner = client;
913         h->dev = client->dev;
914         BUG_ON(!h->owner);
915         h->size = h->orig_size = size;
916         h->flags = NVMAP_HANDLE_WRITE_COMBINE;
917         mutex_init(&h->lock);
918
919         nvmap_handle_add(client->dev, h);
920
921         atomic_set(&ref->dupes, 1);
922         ref->handle = h;
923         atomic_set(&ref->pin, 0);
924         add_handle_ref(client, ref);
925         trace_nvmap_create_handle(client, h, size, ref);
926         return ref;
927 }
928
929 struct nvmap_handle_ref *nvmap_duplicate_handle_id(struct nvmap_client *client,
930                                                    unsigned long id)
931 {
932         struct nvmap_handle_ref *ref = NULL;
933         struct nvmap_handle *h = NULL;
934
935         BUG_ON(!client || client->dev != nvmap_dev);
936         /* on success, the reference count for the handle should be
937          * incremented, so the success paths will not call nvmap_handle_put */
938         h = nvmap_validate_get(client, id);
939
940         if (!h) {
941                 nvmap_debug(client, "%s duplicate handle failed\n",
942                             current->group_leader->comm);
943                 return ERR_PTR(-EPERM);
944         }
945
946         if (!h->alloc) {
947                 nvmap_err(client, "%s duplicating unallocated handle\n",
948                           current->group_leader->comm);
949                 nvmap_handle_put(h);
950                 return ERR_PTR(-EINVAL);
951         }
952
953         nvmap_ref_lock(client);
954         ref = _nvmap_validate_id_locked(client, (unsigned long)h);
955
956         if (ref) {
957                 /* handle already duplicated in client; just increment
958                  * the reference count rather than re-duplicating it */
959                 atomic_inc(&ref->dupes);
960                 nvmap_ref_unlock(client);
961                 return ref;
962         }
963
964         nvmap_ref_unlock(client);
965
966         /* verify that adding this handle to the process' access list
967          * won't exceed the IOVM limit */
968         if (h->heap_pgalloc && !h->pgalloc.contig) {
969                 int oc;
970                 oc = atomic_add_return(h->size, &client->iovm_commit);
971                 if (oc > client->iovm_limit && !client->super) {
972                         atomic_sub(h->size, &client->iovm_commit);
973                         nvmap_handle_put(h);
974                         nvmap_err(client, "duplicating %p in %s over-commits"
975                                   " IOVMM space\n", (void *)id,
976                                   current->group_leader->comm);
977                         return ERR_PTR(-ENOMEM);
978                 }
979         }
980
981         ref = kzalloc(sizeof(*ref), GFP_KERNEL);
982         if (!ref) {
983                 nvmap_handle_put(h);
984                 return ERR_PTR(-ENOMEM);
985         }
986
987         if (!h->heap_pgalloc) {
988                 mutex_lock(&h->lock);
989                 nvmap_carveout_commit_add(client,
990                         nvmap_heap_to_arg(nvmap_block_to_heap(h->carveout)),
991                         h->size);
992                 mutex_unlock(&h->lock);
993         }
994
995         atomic_set(&ref->dupes, 1);
996         ref->handle = h;
997         atomic_set(&ref->pin, 0);
998         add_handle_ref(client, ref);
999         trace_nvmap_duplicate_handle_id(client, id, ref);
1000         return ref;
1001 }