f08855361a5d2ebe23565a0c37ae4f9937696503
[linux-2.6.git] / drivers / video / tegra / nvmap / nvmap.c
1 /*
2  * drivers/video/tegra/nvmap/nvmap.c
3  *
4  * Memory manager for Tegra GPU
5  *
6  * Copyright (c) 2009-2012, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include <linux/err.h>
24 #include <linux/highmem.h>
25 #include <linux/io.h>
26 #include <linux/rbtree.h>
27 #include <linux/vmalloc.h>
28 #include <linux/wait.h>
29 #include <linux/slab.h>
30
31 #include <asm/pgtable.h>
32 #include <asm/tlbflush.h>
33
34 #include <mach/iovmm.h>
35 #include <linux/nvmap.h>
36 #include <trace/events/nvmap.h>
37
38 #include "nvmap.h"
39 #include "nvmap_mru.h"
40
41 /* private nvmap_handle flag for pinning duplicate detection */
42 #define NVMAP_HANDLE_VISITED (0x1ul << 31)
43
44 /* map the backing pages for a heap_pgalloc handle into its IOVMM area */
45 static void map_iovmm_area(struct nvmap_handle *h)
46 {
47         tegra_iovmm_addr_t va;
48         unsigned long i;
49
50         BUG_ON(!h->heap_pgalloc || !h->pgalloc.area);
51         BUG_ON(h->size & ~PAGE_MASK);
52         WARN_ON(!h->pgalloc.dirty);
53
54         for (va = h->pgalloc.area->iovm_start, i = 0;
55              va < (h->pgalloc.area->iovm_start + h->size);
56              i++, va += PAGE_SIZE) {
57                 unsigned long pfn;
58
59                 pfn = page_to_pfn(h->pgalloc.pages[i]);
60                 BUG_ON(!pfn_valid(pfn));
61                 tegra_iovmm_vm_insert_pfn(h->pgalloc.area, va, pfn);
62         }
63         h->pgalloc.dirty = false;
64 }
65
66 /* must be called inside nvmap_pin_lock, to ensure that an entire stream
67  * of pins will complete without racing with a second stream. handle should
68  * have nvmap_handle_get (or nvmap_validate_get) called before calling
69  * this function. */
70 static int pin_locked(struct nvmap_client *client, struct nvmap_handle *h)
71 {
72         struct tegra_iovmm_area *area;
73         BUG_ON(!h->alloc);
74
75         nvmap_mru_lock(client->share);
76         if (atomic_inc_return(&h->pin) == 1) {
77                 if (h->heap_pgalloc && !h->pgalloc.contig) {
78                         area = nvmap_handle_iovmm_locked(client, h);
79                         if (!area) {
80                                 /* no race here, inside the pin mutex */
81                                 atomic_dec(&h->pin);
82                                 nvmap_mru_unlock(client->share);
83                                 return -ENOMEM;
84                         }
85                         if (area != h->pgalloc.area)
86                                 h->pgalloc.dirty = true;
87                         h->pgalloc.area = area;
88                 }
89         }
90         trace_handle_pin(client, h, atomic_read(&h->pin));
91         nvmap_mru_unlock(client->share);
92         return 0;
93 }
94
95 /* doesn't need to be called inside nvmap_pin_lock, since this will only
96  * expand the available VM area */
97 static int handle_unpin(struct nvmap_client *client,
98                 struct nvmap_handle *h, int free_vm)
99 {
100         int ret = 0;
101         nvmap_mru_lock(client->share);
102
103         if (atomic_read(&h->pin) == 0) {
104                 trace_handle_unpin_error(client, h, atomic_read(&h->pin));
105                 nvmap_err(client, "%s unpinning unpinned handle %p\n",
106                           current->group_leader->comm, h);
107                 nvmap_mru_unlock(client->share);
108                 return 0;
109         }
110
111         BUG_ON(!h->alloc);
112
113         if (!atomic_dec_return(&h->pin)) {
114                 if (h->heap_pgalloc && h->pgalloc.area) {
115                         /* if a secure handle is clean (i.e., mapped into
116                          * IOVMM, it needs to be zapped on unpin. */
117                         if (h->secure && !h->pgalloc.dirty) {
118                                 tegra_iovmm_zap_vm(h->pgalloc.area);
119                                 h->pgalloc.dirty = true;
120                         }
121                         if (free_vm) {
122                                 tegra_iovmm_free_vm(h->pgalloc.area);
123                                 h->pgalloc.area = NULL;
124                         } else
125                                 nvmap_mru_insert_locked(client->share, h);
126                         ret = 1;
127                 }
128         }
129
130         trace_handle_unpin(client, h, atomic_read(&h->pin));
131         nvmap_mru_unlock(client->share);
132         nvmap_handle_put(h);
133         return ret;
134 }
135
136 static int pin_array_locked(struct nvmap_client *client,
137                 struct nvmap_handle **h, int count)
138 {
139         int pinned;
140         int i;
141         int err = 0;
142
143         for (pinned = 0; pinned < count; pinned++) {
144                 err = pin_locked(client, h[pinned]);
145                 if (err)
146                         break;
147         }
148
149         if (err) {
150                 /* unpin pinned handles */
151                 for (i = 0; i < pinned; i++) {
152                         /* inc ref counter, because
153                          * handle_unpin decrements it */
154                         nvmap_handle_get(h[i]);
155                         /* unpin handles and free vm */
156                         handle_unpin(client, h[i], true);
157                 }
158         }
159
160         if (err && tegra_iovmm_get_max_free(client->share->iovmm) >=
161                                                         client->iovm_limit) {
162                 /* First attempt to pin in empty iovmm
163                  * may still fail because of fragmentation caused by
164                  * placing handles in MRU areas. After such failure
165                  * all MRU gets cleaned and iovm space is freed.
166                  *
167                  * We have to do pinning again here since there might be is
168                  * no more incoming pin_wait wakeup calls from unpin
169                  * operations */
170                 for (pinned = 0; pinned < count; pinned++) {
171                         err = pin_locked(client, h[pinned]);
172                         if (err)
173                                 break;
174                 }
175                 if (err) {
176                         pr_err("Pinning in empty iovmm failed!!!\n");
177                         BUG_ON(1);
178                 }
179         }
180         return err;
181 }
182
183 static int wait_pin_array_locked(struct nvmap_client *client,
184                 struct nvmap_handle **h, int count)
185 {
186         int ret = 0;
187
188         ret = pin_array_locked(client, h, count);
189
190         if (ret) {
191                 ret = wait_event_interruptible(client->share->pin_wait,
192                                 !pin_array_locked(client, h, count));
193         }
194         return ret ? -EINTR : 0;
195 }
196
197 static int handle_unpin_noref(struct nvmap_client *client, unsigned long id)
198 {
199         struct nvmap_handle *h;
200         int w;
201
202         h = nvmap_validate_get(client, id);
203         if (unlikely(!h)) {
204                 nvmap_err(client, "%s attempting to unpin invalid handle %p\n",
205                           current->group_leader->comm, (void *)id);
206                 return 0;
207         }
208
209         nvmap_err(client, "%s unpinning unreferenced handle %p\n",
210                   current->group_leader->comm, h);
211         WARN_ON(1);
212
213         w = handle_unpin(client, h, false);
214         nvmap_handle_put(h);
215         return w;
216 }
217
218 void nvmap_unpin_ids(struct nvmap_client *client,
219                      unsigned int nr, const unsigned long *ids)
220 {
221         unsigned int i;
222         int do_wake = 0;
223
224         for (i = 0; i < nr; i++) {
225                 struct nvmap_handle_ref *ref;
226
227                 if (!ids[i])
228                         continue;
229
230                 nvmap_ref_lock(client);
231                 ref = _nvmap_validate_id_locked(client, ids[i]);
232                 if (ref) {
233                         struct nvmap_handle *h = ref->handle;
234                         int e = atomic_add_unless(&ref->pin, -1, 0);
235
236                         nvmap_ref_unlock(client);
237
238                         if (!e) {
239                                 nvmap_err(client, "%s unpinning unpinned "
240                                           "handle %08lx\n",
241                                           current->group_leader->comm, ids[i]);
242                         } else {
243                                 do_wake |= handle_unpin(client, h, false);
244                         }
245                 } else {
246                         nvmap_ref_unlock(client);
247                         if (client->super)
248                                 do_wake |= handle_unpin_noref(client, ids[i]);
249                         else
250                                 nvmap_err(client, "%s unpinning invalid "
251                                           "handle %08lx\n",
252                                           current->group_leader->comm, ids[i]);
253                 }
254         }
255
256         if (do_wake)
257                 wake_up(&client->share->pin_wait);
258 }
259
260 /* pins a list of handle_ref objects; same conditions apply as to
261  * _nvmap_handle_pin, but also bumps the pin count of each handle_ref. */
262 int nvmap_pin_ids(struct nvmap_client *client,
263                   unsigned int nr, const unsigned long *ids)
264 {
265         int ret = 0;
266         unsigned int i;
267         struct nvmap_handle **h = (struct nvmap_handle **)ids;
268         struct nvmap_handle_ref *ref;
269
270         /* to optimize for the common case (client provided valid handle
271          * references and the pin succeeds), increment the handle_ref pin
272          * count during validation. in error cases, the tree will need to
273          * be re-walked, since the handle_ref is discarded so that an
274          * allocation isn't required. if a handle_ref is not found,
275          * locally validate that the caller has permission to pin the handle;
276          * handle_refs are not created in this case, so it is possible that
277          * if the caller crashes after pinning a global handle, the handle
278          * will be permanently leaked. */
279         nvmap_ref_lock(client);
280         for (i = 0; i < nr; i++) {
281                 ref = _nvmap_validate_id_locked(client, ids[i]);
282                 if (ref) {
283                         atomic_inc(&ref->pin);
284                         nvmap_handle_get(h[i]);
285                 } else {
286                         struct nvmap_handle *verify;
287                         nvmap_ref_unlock(client);
288                         verify = nvmap_validate_get(client, ids[i]);
289                         if (verify) {
290                                 nvmap_warn(client, "%s pinning unreferenced "
291                                            "handle %p\n",
292                                            current->group_leader->comm, h[i]);
293                         } else {
294                                 ret = -EPERM;
295                                 nr = i;
296                                 break;
297                         }
298                         nvmap_ref_lock(client);
299                 }
300                 if (!h[i]->alloc) {
301                         ret = -EFAULT;
302                         nr = i + 1;
303                         break;
304                 }
305         }
306         nvmap_ref_unlock(client);
307
308         if (ret)
309                 goto out;
310
311         ret = mutex_lock_interruptible(&client->share->pin_lock);
312         if (WARN_ON(ret))
313                 goto out;
314
315         ret = wait_pin_array_locked(client, h, nr);
316
317         mutex_unlock(&client->share->pin_lock);
318
319         if (ret) {
320                 ret = -EINTR;
321         } else {
322                 for (i = 0; i < nr; i++) {
323                         if (h[i]->heap_pgalloc && h[i]->pgalloc.dirty)
324                                 map_iovmm_area(h[i]);
325                 }
326         }
327
328 out:
329         if (ret) {
330                 nvmap_ref_lock(client);
331                 for (i = 0; i < nr; i++) {
332                         if(!ids[i])
333                                 continue;
334
335                         ref = _nvmap_validate_id_locked(client, ids[i]);
336                         if (!ref) {
337                                 nvmap_warn(client, "%s freed handle %p "
338                                            "during pinning\n",
339                                            current->group_leader->comm,
340                                            (void *)ids[i]);
341                                 continue;
342                         }
343                         atomic_dec(&ref->pin);
344                 }
345                 nvmap_ref_unlock(client);
346
347                 for (i = 0; i < nr; i++)
348                         if(h[i])
349                                 nvmap_handle_put(h[i]);
350         }
351
352         return ret;
353 }
354
355 static phys_addr_t handle_phys(struct nvmap_handle *h)
356 {
357         phys_addr_t addr;
358
359         if (h->heap_pgalloc && h->pgalloc.contig) {
360                 addr = page_to_phys(h->pgalloc.pages[0]);
361         } else if (h->heap_pgalloc) {
362                 BUG_ON(!h->pgalloc.area);
363                 addr = h->pgalloc.area->iovm_start;
364         } else {
365                 addr = h->carveout->base;
366         }
367
368         return addr;
369 }
370
371 phys_addr_t nvmap_pin(struct nvmap_client *client,
372                         struct nvmap_handle_ref *ref)
373 {
374         struct nvmap_handle *h;
375         phys_addr_t phys;
376         int ret = 0;
377         unsigned long ref_id;
378
379         if (!ref)
380                 return -EINVAL;
381         ref_id = nvmap_ref_to_id(ref);
382         h = nvmap_get_handle_id(client, ref_id);
383         if (WARN_ON(!h))
384                 return -EINVAL;
385
386         atomic_inc(&ref->pin);
387
388         if (WARN_ON(mutex_lock_interruptible(&client->share->pin_lock))) {
389                 ret = -EINTR;
390         } else {
391                 ret = wait_pin_array_locked(client, &h, 1);
392                 mutex_unlock(&client->share->pin_lock);
393         }
394
395         if (ret) {
396                 atomic_dec(&ref->pin);
397                 nvmap_handle_put(h);
398         } else {
399                 if (h->heap_pgalloc && h->pgalloc.dirty)
400                         map_iovmm_area(h);
401                 phys = handle_phys(h);
402         }
403
404         return ret ?: phys;
405 }
406
407 phys_addr_t nvmap_handle_address(struct nvmap_client *c, unsigned long id)
408 {
409         struct nvmap_handle *h;
410         phys_addr_t phys;
411
412         h = nvmap_get_handle_id(c, id);
413         if (!h)
414                 return -EPERM;
415         mutex_lock(&h->lock);
416         phys = handle_phys(h);
417         mutex_unlock(&h->lock);
418         nvmap_handle_put(h);
419
420         return phys;
421 }
422
423 void nvmap_unpin(struct nvmap_client *client, struct nvmap_handle_ref *ref)
424 {
425         if (!ref)
426                 return;
427
428         atomic_dec(&ref->pin);
429         if (handle_unpin(client, ref->handle, false))
430                 wake_up(&client->share->pin_wait);
431 }
432
433 void nvmap_unpin_handles(struct nvmap_client *client,
434                          struct nvmap_handle **h, int nr)
435 {
436         int i;
437         int do_wake = 0;
438
439         for (i = 0; i < nr; i++) {
440                 if (WARN_ON(!h[i]))
441                         continue;
442                 do_wake |= handle_unpin(client, h[i], false);
443         }
444
445         if (do_wake)
446                 wake_up(&client->share->pin_wait);
447 }
448
449 void *nvmap_kmap(struct nvmap_handle_ref *ref, unsigned int pagenum)
450 {
451         struct nvmap_handle *h;
452         unsigned long paddr;
453         unsigned long kaddr;
454         pgprot_t prot;
455         pte_t **pte;
456
457         BUG_ON(!ref);
458         h = nvmap_handle_get(ref->handle);
459         if (!h)
460                 return NULL;
461
462         BUG_ON(pagenum >= h->size >> PAGE_SHIFT);
463         prot = nvmap_pgprot(h, pgprot_kernel);
464         pte = nvmap_alloc_pte(nvmap_dev, (void **)&kaddr);
465         if (!pte)
466                 goto out;
467
468         if (h->heap_pgalloc)
469                 paddr = page_to_phys(h->pgalloc.pages[pagenum]);
470         else
471                 paddr = h->carveout->base + pagenum * PAGE_SIZE;
472
473         set_pte_at(&init_mm, kaddr, *pte,
474                                 pfn_pte(__phys_to_pfn(paddr), prot));
475         flush_tlb_kernel_page(kaddr);
476         return (void *)kaddr;
477 out:
478         nvmap_handle_put(ref->handle);
479         return NULL;
480 }
481
482 void nvmap_kunmap(struct nvmap_handle_ref *ref, unsigned int pagenum,
483                   void *addr)
484 {
485         struct nvmap_handle *h;
486         unsigned long paddr;
487         pte_t **pte;
488
489         BUG_ON(!addr || !ref);
490         h = ref->handle;
491
492         if (h->heap_pgalloc)
493                 paddr = page_to_phys(h->pgalloc.pages[pagenum]);
494         else
495                 paddr = h->carveout->base + pagenum * PAGE_SIZE;
496
497         if (h->flags != NVMAP_HANDLE_UNCACHEABLE &&
498             h->flags != NVMAP_HANDLE_WRITE_COMBINE) {
499                 dmac_flush_range(addr, addr + PAGE_SIZE);
500                 outer_flush_range(paddr, paddr + PAGE_SIZE);
501         }
502
503         pte = nvmap_vaddr_to_pte(nvmap_dev, (unsigned long)addr);
504         nvmap_free_pte(nvmap_dev, pte);
505         nvmap_handle_put(h);
506 }
507
508 void *nvmap_mmap(struct nvmap_handle_ref *ref)
509 {
510         struct nvmap_handle *h;
511         pgprot_t prot;
512         unsigned long adj_size;
513         unsigned long offs;
514         struct vm_struct *v;
515         void *p;
516
517         h = nvmap_handle_get(ref->handle);
518         if (!h)
519                 return NULL;
520
521         prot = nvmap_pgprot(h, pgprot_kernel);
522
523         if (h->heap_pgalloc)
524                 return vm_map_ram(h->pgalloc.pages, h->size >> PAGE_SHIFT,
525                                   -1, prot);
526
527         /* carveout - explicitly map the pfns into a vmalloc area */
528
529         nvmap_usecount_inc(h);
530
531         adj_size = h->carveout->base & ~PAGE_MASK;
532         adj_size += h->size;
533         adj_size = PAGE_ALIGN(adj_size);
534
535         v = alloc_vm_area(adj_size, NULL);
536         if (!v) {
537                 nvmap_usecount_dec(h);
538                 nvmap_handle_put(h);
539                 return NULL;
540         }
541
542         p = v->addr + (h->carveout->base & ~PAGE_MASK);
543
544         for (offs = 0; offs < adj_size; offs += PAGE_SIZE) {
545                 unsigned long addr = (unsigned long) v->addr + offs;
546                 unsigned int pfn;
547                 pgd_t *pgd;
548                 pud_t *pud;
549                 pmd_t *pmd;
550                 pte_t *pte;
551
552                 pfn = __phys_to_pfn(h->carveout->base + offs);
553                 pgd = pgd_offset_k(addr);
554                 pud = pud_alloc(&init_mm, pgd, addr);
555                 if (!pud)
556                         break;
557                 pmd = pmd_alloc(&init_mm, pud, addr);
558                 if (!pmd)
559                         break;
560                 pte = pte_alloc_kernel(pmd, addr);
561                 if (!pte)
562                         break;
563                 set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
564                 flush_tlb_kernel_page(addr);
565         }
566
567         if (offs != adj_size) {
568                 free_vm_area(v);
569                 nvmap_usecount_dec(h);
570                 nvmap_handle_put(h);
571                 return NULL;
572         }
573
574         /* leave the handle ref count incremented by 1, so that
575          * the handle will not be freed while the kernel mapping exists.
576          * nvmap_handle_put will be called by unmapping this address */
577         return p;
578 }
579
580 void nvmap_munmap(struct nvmap_handle_ref *ref, void *addr)
581 {
582         struct nvmap_handle *h;
583
584         if (!ref)
585                 return;
586
587         h = ref->handle;
588
589         if (h->heap_pgalloc) {
590                 vm_unmap_ram(addr, h->size >> PAGE_SHIFT);
591         } else {
592                 struct vm_struct *vm;
593                 addr -= (h->carveout->base & ~PAGE_MASK);
594                 vm = remove_vm_area(addr);
595                 BUG_ON(!vm);
596                 kfree(vm);
597                 nvmap_usecount_dec(h);
598         }
599         nvmap_handle_put(h);
600 }
601
602 struct nvmap_handle_ref *nvmap_alloc(struct nvmap_client *client, size_t size,
603                                      size_t align, unsigned int flags,
604                                      unsigned int heap_mask)
605 {
606         const unsigned int default_heap = (NVMAP_HEAP_SYSMEM |
607                                            NVMAP_HEAP_CARVEOUT_GENERIC);
608         struct nvmap_handle_ref *r = NULL;
609         int err;
610
611         if (heap_mask == 0)
612                 heap_mask = default_heap;
613
614         r = nvmap_create_handle(client, size);
615         if (IS_ERR(r))
616                 return r;
617
618         err = nvmap_alloc_handle_id(client, nvmap_ref_to_id(r),
619                                     heap_mask, align, flags);
620
621         if (err) {
622                 nvmap_free_handle_id(client, nvmap_ref_to_id(r));
623                 return ERR_PTR(err);
624         }
625
626         return r;
627 }
628
629 /* allocates memory with specifed iovm_start address. */
630 struct nvmap_handle_ref *nvmap_alloc_iovm(struct nvmap_client *client,
631         size_t size, size_t align, unsigned int flags, unsigned int iovm_start)
632 {
633         int err;
634         struct nvmap_handle *h;
635         struct nvmap_handle_ref *r;
636         const unsigned int default_heap = NVMAP_HEAP_IOVMM;
637
638         /* size need to be more than one page.
639          * otherwise heap preference would change to system heap.
640          */
641         if (size <= PAGE_SIZE)
642                 size = PAGE_SIZE << 1;
643         r = nvmap_create_handle(client, size);
644         if (IS_ERR_OR_NULL(r))
645                 return r;
646
647         h = r->handle;
648         h->pgalloc.iovm_addr = iovm_start;
649         err = nvmap_alloc_handle_id(client, nvmap_ref_to_id(r),
650                         default_heap, align, flags);
651         if (err)
652                 goto fail;
653
654         err = mutex_lock_interruptible(&client->share->pin_lock);
655         if (WARN_ON(err))
656                 goto fail;
657         err = pin_locked(client, h);
658         mutex_unlock(&client->share->pin_lock);
659         if (err)
660                 goto fail;
661         return r;
662
663 fail:
664         nvmap_free_handle_id(client, nvmap_ref_to_id(r));
665         return ERR_PTR(err);
666 }
667
668 void nvmap_free_iovm(struct nvmap_client *client, struct nvmap_handle_ref *r)
669 {
670         unsigned long ref_id = nvmap_ref_to_id(r);
671
672         nvmap_unpin_ids(client, 1, &ref_id);
673         nvmap_free_handle_id(client, ref_id);
674 }
675
676 void nvmap_free(struct nvmap_client *client, struct nvmap_handle_ref *r)
677 {
678         if (!r)
679                 return;
680
681         nvmap_free_handle_id(client, nvmap_ref_to_id(r));
682 }