924952932c85eb77c17e7874b2095c8abda80023
[linux-2.6.git] / drivers / video / tegra / nvmap / nvmap_handle.c
1 /*
2  * drivers/video/tegra/nvmap/nvmap_handle.c
3  *
4  * Handle allocation and freeing routines for nvmap
5  *
6  * Copyright (c) 2009-2011, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include <linux/err.h>
24 #include <linux/kernel.h>
25 #include <linux/list.h>
26 #include <linux/mm.h>
27 #include <linux/rbtree.h>
28 #include <linux/slab.h>
29 #include <linux/vmalloc.h>
30 #include <linux/fs.h>
31
32 #include <asm/cacheflush.h>
33 #include <asm/outercache.h>
34 #include <asm/pgtable.h>
35
36 #include <mach/iovmm.h>
37 #include <mach/nvmap.h>
38
39 #include <linux/vmstat.h>
40 #include <linux/swap.h>
41
42 #include "nvmap.h"
43 #include "nvmap_mru.h"
44 #include "nvmap_common.h"
45
46 #define PRINT_CARVEOUT_CONVERSION 0
47 #if PRINT_CARVEOUT_CONVERSION
48 #define PR_INFO pr_info
49 #else
50 #define PR_INFO(...)
51 #endif
52
53 #define NVMAP_SECURE_HEAPS      (NVMAP_HEAP_CARVEOUT_IRAM | NVMAP_HEAP_IOVMM | \
54                                  NVMAP_HEAP_CARVEOUT_VPR)
55 #ifdef CONFIG_NVMAP_HIGHMEM_ONLY
56 #define GFP_NVMAP               (__GFP_HIGHMEM | __GFP_NOWARN)
57 #else
58 #define GFP_NVMAP               (GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN)
59 #endif
60 /* handles may be arbitrarily large (16+MiB), and any handle allocated from
61  * the kernel (i.e., not a carveout handle) includes its array of pages. to
62  * preserve kmalloc space, if the array of pages exceeds PAGELIST_VMALLOC_MIN,
63  * the array is allocated using vmalloc. */
64 #define PAGELIST_VMALLOC_MIN    (PAGE_SIZE * 2)
65
66 static inline void *altalloc(size_t len)
67 {
68         if (len >= PAGELIST_VMALLOC_MIN)
69                 return vmalloc(len);
70         else
71                 return kmalloc(len, GFP_KERNEL);
72 }
73
74 static inline void altfree(void *ptr, size_t len)
75 {
76         if (!ptr)
77                 return;
78
79         if (len >= PAGELIST_VMALLOC_MIN)
80                 vfree(ptr);
81         else
82                 kfree(ptr);
83 }
84
85 void _nvmap_handle_free(struct nvmap_handle *h)
86 {
87         struct nvmap_device *dev = h->dev;
88         unsigned int i, nr_page;
89
90         if (nvmap_handle_remove(dev, h) != 0)
91                 return;
92
93         if (!h->alloc)
94                 goto out;
95
96         if (!h->heap_pgalloc) {
97                 nvmap_usecount_inc(h);
98                 nvmap_heap_free(h->carveout);
99                 goto out;
100         }
101
102         nr_page = DIV_ROUND_UP(h->size, PAGE_SIZE);
103
104         BUG_ON(h->size & ~PAGE_MASK);
105         BUG_ON(!h->pgalloc.pages);
106
107         nvmap_mru_remove(nvmap_get_share_from_dev(dev), h);
108
109         /* Restore page attributes. */
110         if (h->flags == NVMAP_HANDLE_WRITE_COMBINE ||
111             h->flags == NVMAP_HANDLE_UNCACHEABLE ||
112             h->flags == NVMAP_HANDLE_INNER_CACHEABLE)
113                 set_pages_array_wb(h->pgalloc.pages, nr_page);
114
115         if (h->pgalloc.area)
116                 tegra_iovmm_free_vm(h->pgalloc.area);
117
118         for (i = 0; i < nr_page; i++)
119                 __free_page(h->pgalloc.pages[i]);
120
121         altfree(h->pgalloc.pages, nr_page * sizeof(struct page *));
122
123 out:
124         kfree(h);
125 }
126
127 static struct page *nvmap_alloc_pages_exact(gfp_t gfp, size_t size)
128 {
129         struct page *page, *p, *e;
130         unsigned int order;
131
132         size = PAGE_ALIGN(size);
133         order = get_order(size);
134         page = alloc_pages(gfp, order);
135
136         if (!page)
137                 return NULL;
138
139         split_page(page, order);
140         e = page + (1 << order);
141         for (p = page + (size >> PAGE_SHIFT); p < e; p++)
142                 __free_page(p);
143
144         return page;
145 }
146
147 static int handle_page_alloc(struct nvmap_client *client,
148                              struct nvmap_handle *h, bool contiguous)
149 {
150         size_t size = PAGE_ALIGN(h->size);
151         unsigned int nr_page = size >> PAGE_SHIFT;
152         pgprot_t prot;
153         unsigned int i = 0;
154         struct page **pages;
155         unsigned long base;
156
157         pages = altalloc(nr_page * sizeof(*pages));
158         if (!pages)
159                 return -ENOMEM;
160
161         prot = nvmap_pgprot(h, pgprot_kernel);
162
163 #ifdef CONFIG_NVMAP_ALLOW_SYSMEM
164         if (nr_page == 1)
165                 contiguous = true;
166 #endif
167
168         h->pgalloc.area = NULL;
169         if (contiguous) {
170                 struct page *page;
171                 page = nvmap_alloc_pages_exact(GFP_NVMAP, size);
172                 if (!page)
173                         goto fail;
174
175                 for (i = 0; i < nr_page; i++)
176                         pages[i] = nth_page(page, i);
177
178         } else {
179                 for (i = 0; i < nr_page; i++) {
180                         pages[i] = nvmap_alloc_pages_exact(GFP_NVMAP,
181                                 PAGE_SIZE);
182                         if (!pages[i])
183                                 goto fail;
184                 }
185
186 #ifndef CONFIG_NVMAP_RECLAIM_UNPINNED_VM
187                 h->pgalloc.area = tegra_iovmm_create_vm(client->share->iovmm,
188                                         NULL, size, h->align, prot,
189                                         h->pgalloc.iovm_addr);
190                 if (!h->pgalloc.area)
191                         goto fail;
192
193                 h->pgalloc.dirty = true;
194 #endif
195         }
196
197         /* Update the pages mapping in kernel page table. */
198         if (h->flags == NVMAP_HANDLE_WRITE_COMBINE)
199                 set_pages_array_wc(pages, nr_page);
200         else if (h->flags == NVMAP_HANDLE_UNCACHEABLE)
201                 set_pages_array_uc(pages, nr_page);
202         else if (h->flags == NVMAP_HANDLE_INNER_CACHEABLE)
203                 set_pages_array_iwb(pages, nr_page);
204         else
205                 goto skip_cache_flush;
206
207         /* Flush the cache for allocated high mem pages only */
208         for (i = 0; i < nr_page; i++) {
209                 if (PageHighMem(pages[i])) {
210                         __flush_dcache_page(page_mapping(pages[i]), pages[i]);
211                         base = page_to_phys(pages[i]);
212                         outer_flush_range(base, base + PAGE_SIZE);
213                 }
214         }
215
216 skip_cache_flush:
217         h->size = size;
218         h->pgalloc.pages = pages;
219         h->pgalloc.contig = contiguous;
220         INIT_LIST_HEAD(&h->pgalloc.mru_list);
221         return 0;
222
223 fail:
224         while (i--)
225                 __free_page(pages[i]);
226         altfree(pages, nr_page * sizeof(*pages));
227         wmb();
228         return -ENOMEM;
229 }
230
231 static void alloc_handle(struct nvmap_client *client,
232                          struct nvmap_handle *h, unsigned int type)
233 {
234         BUG_ON(type & (type - 1));
235
236 #ifdef CONFIG_NVMAP_CONVERT_CARVEOUT_TO_IOVMM
237 #define __NVMAP_HEAP_CARVEOUT   (NVMAP_HEAP_CARVEOUT_IRAM | NVMAP_HEAP_CARVEOUT_VPR)
238 #define __NVMAP_HEAP_IOVMM      (NVMAP_HEAP_IOVMM | NVMAP_HEAP_CARVEOUT_GENERIC)
239         if (type & NVMAP_HEAP_CARVEOUT_GENERIC) {
240 #ifdef CONFIG_NVMAP_ALLOW_SYSMEM
241                 if (h->size <= PAGE_SIZE) {
242                         PR_INFO("###CARVEOUT CONVERTED TO SYSMEM "
243                                 "0x%x bytes %s(%d)###\n",
244                                 h->size, current->comm, current->pid);
245                         goto sysheap;
246                 }
247 #endif
248                 PR_INFO("###CARVEOUT CONVERTED TO IOVM "
249                         "0x%x bytes %s(%d)###\n",
250                         h->size, current->comm, current->pid);
251         }
252 #else
253 #define __NVMAP_HEAP_CARVEOUT   NVMAP_HEAP_CARVEOUT_MASK
254 #define __NVMAP_HEAP_IOVMM      NVMAP_HEAP_IOVMM
255 #endif
256
257         if (type & __NVMAP_HEAP_CARVEOUT) {
258                 struct nvmap_heap_block *b;
259 #ifdef CONFIG_NVMAP_CONVERT_CARVEOUT_TO_IOVMM
260                 PR_INFO("###IRAM REQUEST RETAINED "
261                         "0x%x bytes %s(%d)###\n",
262                         h->size, current->comm, current->pid);
263 #endif
264                 /* Protect handle from relocation */
265                 nvmap_usecount_inc(h);
266
267                 b = nvmap_carveout_alloc(client, h, type);
268                 if (b) {
269                         h->heap_pgalloc = false;
270                         h->alloc = true;
271                         nvmap_carveout_commit_add(client,
272                                 nvmap_heap_to_arg(nvmap_block_to_heap(b)),
273                                 h->size);
274                 }
275                 nvmap_usecount_dec(h);
276
277         } else if (type & __NVMAP_HEAP_IOVMM) {
278                 size_t reserved = PAGE_ALIGN(h->size);
279                 int commit = 0;
280                 int ret;
281
282                 /* increment the committed IOVM space prior to allocation
283                  * to avoid race conditions with other threads simultaneously
284                  * allocating. */
285                 commit = atomic_add_return(reserved,
286                                             &client->iovm_commit);
287
288                 if (commit < client->iovm_limit)
289                         ret = handle_page_alloc(client, h, false);
290                 else
291                         ret = -ENOMEM;
292
293                 if (!ret) {
294                         h->heap_pgalloc = true;
295                         h->alloc = true;
296                 } else {
297                         atomic_sub(reserved, &client->iovm_commit);
298                 }
299
300         } else if (type & NVMAP_HEAP_SYSMEM) {
301 #if defined(CONFIG_NVMAP_CONVERT_CARVEOUT_TO_IOVMM) && \
302         defined(CONFIG_NVMAP_ALLOW_SYSMEM)
303 sysheap:
304 #endif
305                 if (handle_page_alloc(client, h, true) == 0) {
306                         BUG_ON(!h->pgalloc.contig);
307                         h->heap_pgalloc = true;
308                         h->alloc = true;
309                 }
310         }
311 }
312
313 /* small allocations will try to allocate from generic OS memory before
314  * any of the limited heaps, to increase the effective memory for graphics
315  * allocations, and to reduce fragmentation of the graphics heaps with
316  * sub-page splinters */
317 static const unsigned int heap_policy_small[] = {
318         NVMAP_HEAP_CARVEOUT_VPR,
319         NVMAP_HEAP_CARVEOUT_IRAM,
320 #ifdef CONFIG_NVMAP_ALLOW_SYSMEM
321         NVMAP_HEAP_SYSMEM,
322 #endif
323         NVMAP_HEAP_CARVEOUT_MASK,
324         NVMAP_HEAP_IOVMM,
325         0,
326 };
327
328 static const unsigned int heap_policy_large[] = {
329         NVMAP_HEAP_CARVEOUT_VPR,
330         NVMAP_HEAP_CARVEOUT_IRAM,
331         NVMAP_HEAP_IOVMM,
332         NVMAP_HEAP_CARVEOUT_MASK,
333 #ifdef CONFIG_NVMAP_ALLOW_SYSMEM
334         NVMAP_HEAP_SYSMEM,
335 #endif
336         0,
337 };
338
339 /* Do not override single page policy if there is not much space to
340 avoid invoking system oom killer. */
341 #define NVMAP_SMALL_POLICY_SYSMEM_THRESHOLD 50000000
342
343 int nvmap_alloc_handle_id(struct nvmap_client *client,
344                           unsigned long id, unsigned int heap_mask,
345                           size_t align, unsigned int flags)
346 {
347         struct nvmap_handle *h = NULL;
348         const unsigned int *alloc_policy;
349         int nr_page;
350         int err = -ENOMEM;
351
352         h = nvmap_get_handle_id(client, id);
353
354         if (!h)
355                 return -EINVAL;
356
357         if (h->alloc)
358                 goto out;
359
360         h->userflags = flags;
361         nr_page = ((h->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
362         h->secure = !!(flags & NVMAP_HANDLE_SECURE);
363         h->flags = (flags & NVMAP_HANDLE_CACHE_FLAG);
364         h->align = max_t(size_t, align, L1_CACHE_BYTES);
365
366 #ifndef CONFIG_TEGRA_IOVMM
367         if (heap_mask & NVMAP_HEAP_IOVMM) {
368                 heap_mask &= NVMAP_HEAP_IOVMM;
369                 heap_mask |= NVMAP_HEAP_CARVEOUT_GENERIC;
370         }
371 #endif
372 #ifndef CONFIG_NVMAP_CONVERT_CARVEOUT_TO_IOVMM
373 #ifdef CONFIG_NVMAP_ALLOW_SYSMEM
374         /* Allow single pages allocations in system memory to save
375          * carveout space and avoid extra iovm mappings */
376         if (nr_page == 1) {
377                 if (heap_mask & NVMAP_HEAP_IOVMM)
378                         heap_mask |= NVMAP_HEAP_SYSMEM;
379                 else if (heap_mask & NVMAP_HEAP_CARVEOUT_GENERIC) {
380                         /* Calculate size of free physical pages
381                          * managed by kernel */
382                         unsigned long freeMem =
383                                 (global_page_state(NR_FREE_PAGES) +
384                                 global_page_state(NR_FILE_PAGES) -
385                                 total_swapcache_pages) << PAGE_SHIFT;
386
387                         if (freeMem > NVMAP_SMALL_POLICY_SYSMEM_THRESHOLD)
388                                 heap_mask |= NVMAP_HEAP_SYSMEM;
389                 }
390         }
391 #endif
392
393         /* This restriction is deprecated as alignments greater than
394            PAGE_SIZE are now correctly handled, but it is retained for
395            AP20 compatibility. */
396         if (h->align > PAGE_SIZE)
397                 heap_mask &= NVMAP_HEAP_CARVEOUT_MASK;
398 #endif
399         /* secure allocations can only be served from secure heaps */
400         if (h->secure)
401                 heap_mask &= NVMAP_SECURE_HEAPS;
402
403         if (!heap_mask) {
404                 err = -EINVAL;
405                 goto out;
406         }
407
408         alloc_policy = (nr_page == 1) ? heap_policy_small : heap_policy_large;
409
410         while (!h->alloc && *alloc_policy) {
411                 unsigned int heap_type;
412
413                 heap_type = *alloc_policy++;
414                 heap_type &= heap_mask;
415
416                 if (!heap_type)
417                         continue;
418
419                 heap_mask &= ~heap_type;
420
421                 while (heap_type && !h->alloc) {
422                         unsigned int heap;
423
424                         /* iterate possible heaps MSB-to-LSB, since higher-
425                          * priority carveouts will have higher usage masks */
426                         heap = 1 << __fls(heap_type);
427                         alloc_handle(client, h, heap);
428                         heap_type &= ~heap;
429                 }
430         }
431
432 out:
433         err = (h->alloc) ? 0 : err;
434         nvmap_handle_put(h);
435         return err;
436 }
437
438 void nvmap_free_handle_id(struct nvmap_client *client, unsigned long id)
439 {
440         struct nvmap_handle_ref *ref;
441         struct nvmap_handle *h;
442         int pins;
443
444         nvmap_ref_lock(client);
445
446         ref = _nvmap_validate_id_locked(client, id);
447         if (!ref) {
448                 nvmap_ref_unlock(client);
449                 return;
450         }
451
452         BUG_ON(!ref->handle);
453         h = ref->handle;
454
455         if (atomic_dec_return(&ref->dupes)) {
456                 nvmap_ref_unlock(client);
457                 goto out;
458         }
459
460         smp_rmb();
461         pins = atomic_read(&ref->pin);
462         rb_erase(&ref->node, &client->handle_refs);
463
464         if (h->alloc && h->heap_pgalloc && !h->pgalloc.contig)
465                 atomic_sub(h->size, &client->iovm_commit);
466
467         if (h->alloc && !h->heap_pgalloc) {
468                 mutex_lock(&h->lock);
469                 nvmap_carveout_commit_subtract(client,
470                         nvmap_heap_to_arg(nvmap_block_to_heap(h->carveout)),
471                         h->size);
472                 mutex_unlock(&h->lock);
473         }
474
475         nvmap_ref_unlock(client);
476
477         if (pins)
478                 nvmap_err(client, "%s freeing pinned handle %p\n",
479                           current->group_leader->comm, h);
480
481         while (pins--)
482                 nvmap_unpin_handles(client, &ref->handle, 1);
483
484         if (h->owner == client)
485                 h->owner = NULL;
486
487         kfree(ref);
488
489 out:
490         BUG_ON(!atomic_read(&h->ref));
491         nvmap_handle_put(h);
492 }
493
494 static void add_handle_ref(struct nvmap_client *client,
495                            struct nvmap_handle_ref *ref)
496 {
497         struct rb_node **p, *parent = NULL;
498
499         nvmap_ref_lock(client);
500         p = &client->handle_refs.rb_node;
501         while (*p) {
502                 struct nvmap_handle_ref *node;
503                 parent = *p;
504                 node = rb_entry(parent, struct nvmap_handle_ref, node);
505                 if (ref->handle > node->handle)
506                         p = &parent->rb_right;
507                 else
508                         p = &parent->rb_left;
509         }
510         rb_link_node(&ref->node, parent, p);
511         rb_insert_color(&ref->node, &client->handle_refs);
512         nvmap_ref_unlock(client);
513 }
514
515 struct nvmap_handle_ref *nvmap_create_handle(struct nvmap_client *client,
516                                              size_t size)
517 {
518         struct nvmap_handle *h;
519         struct nvmap_handle_ref *ref = NULL;
520
521         if (!client)
522                 return ERR_PTR(-EINVAL);
523
524         if (!size)
525                 return ERR_PTR(-EINVAL);
526
527         h = kzalloc(sizeof(*h), GFP_KERNEL);
528         if (!h)
529                 return ERR_PTR(-ENOMEM);
530
531         ref = kzalloc(sizeof(*ref), GFP_KERNEL);
532         if (!ref) {
533                 kfree(h);
534                 return ERR_PTR(-ENOMEM);
535         }
536
537         atomic_set(&h->ref, 1);
538         atomic_set(&h->pin, 0);
539         h->owner = client;
540         h->dev = client->dev;
541         BUG_ON(!h->owner);
542         h->size = h->orig_size = size;
543         h->flags = NVMAP_HANDLE_WRITE_COMBINE;
544         mutex_init(&h->lock);
545
546         nvmap_handle_add(client->dev, h);
547
548         atomic_set(&ref->dupes, 1);
549         ref->handle = h;
550         atomic_set(&ref->pin, 0);
551         add_handle_ref(client, ref);
552         return ref;
553 }
554
555 struct nvmap_handle_ref *nvmap_duplicate_handle_id(struct nvmap_client *client,
556                                                    unsigned long id)
557 {
558         struct nvmap_handle_ref *ref = NULL;
559         struct nvmap_handle *h = NULL;
560
561         BUG_ON(!client || client->dev != nvmap_dev);
562         /* on success, the reference count for the handle should be
563          * incremented, so the success paths will not call nvmap_handle_put */
564         h = nvmap_validate_get(client, id);
565
566         if (!h) {
567                 nvmap_debug(client, "%s duplicate handle failed\n",
568                             current->group_leader->comm);
569                 return ERR_PTR(-EPERM);
570         }
571
572         if (!h->alloc) {
573                 nvmap_err(client, "%s duplicating unallocated handle\n",
574                           current->group_leader->comm);
575                 nvmap_handle_put(h);
576                 return ERR_PTR(-EINVAL);
577         }
578
579         nvmap_ref_lock(client);
580         ref = _nvmap_validate_id_locked(client, (unsigned long)h);
581
582         if (ref) {
583                 /* handle already duplicated in client; just increment
584                  * the reference count rather than re-duplicating it */
585                 atomic_inc(&ref->dupes);
586                 nvmap_ref_unlock(client);
587                 return ref;
588         }
589
590         nvmap_ref_unlock(client);
591
592         /* verify that adding this handle to the process' access list
593          * won't exceed the IOVM limit */
594         if (h->heap_pgalloc && !h->pgalloc.contig) {
595                 int oc;
596                 oc = atomic_add_return(h->size, &client->iovm_commit);
597                 if (oc > client->iovm_limit && !client->super) {
598                         atomic_sub(h->size, &client->iovm_commit);
599                         nvmap_handle_put(h);
600                         nvmap_err(client, "duplicating %p in %s over-commits"
601                                   " IOVMM space\n", (void *)id,
602                                   current->group_leader->comm);
603                         return ERR_PTR(-ENOMEM);
604                 }
605         }
606
607         ref = kzalloc(sizeof(*ref), GFP_KERNEL);
608         if (!ref) {
609                 nvmap_handle_put(h);
610                 return ERR_PTR(-ENOMEM);
611         }
612
613         if (!h->heap_pgalloc) {
614                 mutex_lock(&h->lock);
615                 nvmap_carveout_commit_add(client,
616                         nvmap_heap_to_arg(nvmap_block_to_heap(h->carveout)),
617                         h->size);
618                 mutex_unlock(&h->lock);
619         }
620
621         atomic_set(&ref->dupes, 1);
622         ref->handle = h;
623         atomic_set(&ref->pin, 0);
624         add_handle_ref(client, ref);
625         return ref;
626 }