5209c412e47d875cea7a49237b56dfc418b8de75
[linux-2.6.git] / drivers / video / tegra / nvmap / nvmap.c
1 /*
2  * drivers/video/tegra/nvmap/nvmap.c
3  *
4  * Memory manager for Tegra GPU
5  *
6  * Copyright (c) 2009-2012, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include <linux/err.h>
24 #include <linux/highmem.h>
25 #include <linux/io.h>
26 #include <linux/rbtree.h>
27 #include <linux/vmalloc.h>
28 #include <linux/wait.h>
29 #include <linux/slab.h>
30
31 #include <asm/pgtable.h>
32 #include <asm/tlbflush.h>
33
34 #include <mach/iovmm.h>
35 #include <linux/nvmap.h>
36
37 #include "nvmap.h"
38 #include "nvmap_mru.h"
39
40 /* private nvmap_handle flag for pinning duplicate detection */
41 #define NVMAP_HANDLE_VISITED (0x1ul << 31)
42
43 /* map the backing pages for a heap_pgalloc handle into its IOVMM area */
44 static void map_iovmm_area(struct nvmap_handle *h)
45 {
46         tegra_iovmm_addr_t va;
47         unsigned long i;
48
49         BUG_ON(!h->heap_pgalloc || !h->pgalloc.area);
50         BUG_ON(h->size & ~PAGE_MASK);
51         WARN_ON(!h->pgalloc.dirty);
52
53         for (va = h->pgalloc.area->iovm_start, i = 0;
54              va < (h->pgalloc.area->iovm_start + h->size);
55              i++, va += PAGE_SIZE) {
56                 unsigned long pfn;
57
58                 pfn = page_to_pfn(h->pgalloc.pages[i]);
59                 BUG_ON(!pfn_valid(pfn));
60                 tegra_iovmm_vm_insert_pfn(h->pgalloc.area, va, pfn);
61         }
62         h->pgalloc.dirty = false;
63 }
64
65 /* must be called inside nvmap_pin_lock, to ensure that an entire stream
66  * of pins will complete without racing with a second stream. handle should
67  * have nvmap_handle_get (or nvmap_validate_get) called before calling
68  * this function. */
69 static int pin_locked(struct nvmap_client *client, struct nvmap_handle *h)
70 {
71         struct tegra_iovmm_area *area;
72         BUG_ON(!h->alloc);
73
74         nvmap_mru_lock(client->share);
75         if (atomic_inc_return(&h->pin) == 1) {
76                 if (h->heap_pgalloc && !h->pgalloc.contig) {
77                         area = nvmap_handle_iovmm_locked(client, h);
78                         if (!area) {
79                                 /* no race here, inside the pin mutex */
80                                 atomic_dec(&h->pin);
81                                 nvmap_mru_unlock(client->share);
82                                 return -ENOMEM;
83                         }
84                         if (area != h->pgalloc.area)
85                                 h->pgalloc.dirty = true;
86                         h->pgalloc.area = area;
87                 }
88         }
89         nvmap_mru_unlock(client->share);
90         return 0;
91 }
92
93 /* doesn't need to be called inside nvmap_pin_lock, since this will only
94  * expand the available VM area */
95 static int handle_unpin(struct nvmap_client *client,
96                 struct nvmap_handle *h, int free_vm)
97 {
98         int ret = 0;
99         nvmap_mru_lock(client->share);
100
101         if (atomic_read(&h->pin) == 0) {
102                 nvmap_err(client, "%s unpinning unpinned handle %p\n",
103                           current->group_leader->comm, h);
104                 nvmap_mru_unlock(client->share);
105                 return 0;
106         }
107
108         BUG_ON(!h->alloc);
109
110         if (!atomic_dec_return(&h->pin)) {
111                 if (h->heap_pgalloc && h->pgalloc.area) {
112                         /* if a secure handle is clean (i.e., mapped into
113                          * IOVMM, it needs to be zapped on unpin. */
114                         if (h->secure && !h->pgalloc.dirty) {
115                                 tegra_iovmm_zap_vm(h->pgalloc.area);
116                                 h->pgalloc.dirty = true;
117                         }
118                         if (free_vm) {
119                                 tegra_iovmm_free_vm(h->pgalloc.area);
120                                 h->pgalloc.area = NULL;
121                         } else
122                                 nvmap_mru_insert_locked(client->share, h);
123                         ret = 1;
124                 }
125         }
126
127         nvmap_mru_unlock(client->share);
128         nvmap_handle_put(h);
129         return ret;
130 }
131
132 static int pin_array_locked(struct nvmap_client *client,
133                 struct nvmap_handle **h, int count)
134 {
135         int pinned;
136         int i;
137         int err = 0;
138
139         for (pinned = 0; pinned < count; pinned++) {
140                 err = pin_locked(client, h[pinned]);
141                 if (err)
142                         break;
143         }
144
145         if (err) {
146                 /* unpin pinned handles */
147                 for (i = 0; i < pinned; i++) {
148                         /* inc ref counter, because
149                          * handle_unpin decrements it */
150                         nvmap_handle_get(h[i]);
151                         /* unpin handles and free vm */
152                         handle_unpin(client, h[i], true);
153                 }
154         }
155
156         if (err && tegra_iovmm_get_max_free(client->share->iovmm) >=
157                                                         client->iovm_limit) {
158                 /* First attempt to pin in empty iovmm
159                  * may still fail because of fragmentation caused by
160                  * placing handles in MRU areas. After such failure
161                  * all MRU gets cleaned and iovm space is freed.
162                  *
163                  * We have to do pinning again here since there might be is
164                  * no more incoming pin_wait wakeup calls from unpin
165                  * operations */
166                 for (pinned = 0; pinned < count; pinned++) {
167                         err = pin_locked(client, h[pinned]);
168                         if (err)
169                                 break;
170                 }
171                 if (err) {
172                         pr_err("Pinning in empty iovmm failed!!!\n");
173                         BUG_ON(1);
174                 }
175         }
176         return err;
177 }
178
179 static int wait_pin_array_locked(struct nvmap_client *client,
180                 struct nvmap_handle **h, int count)
181 {
182         int ret = 0;
183
184         ret = pin_array_locked(client, h, count);
185
186         if (ret) {
187                 ret = wait_event_interruptible(client->share->pin_wait,
188                                 !pin_array_locked(client, h, count));
189         }
190         return ret ? -EINTR : 0;
191 }
192
193 static int handle_unpin_noref(struct nvmap_client *client, unsigned long id)
194 {
195         struct nvmap_handle *h;
196         int w;
197
198         h = nvmap_validate_get(client, id);
199         if (unlikely(!h)) {
200                 nvmap_err(client, "%s attempting to unpin invalid handle %p\n",
201                           current->group_leader->comm, (void *)id);
202                 return 0;
203         }
204
205         nvmap_err(client, "%s unpinning unreferenced handle %p\n",
206                   current->group_leader->comm, h);
207         WARN_ON(1);
208
209         w = handle_unpin(client, h, false);
210         nvmap_handle_put(h);
211         return w;
212 }
213
214 void nvmap_unpin_ids(struct nvmap_client *client,
215                      unsigned int nr, const unsigned long *ids)
216 {
217         unsigned int i;
218         int do_wake = 0;
219
220         for (i = 0; i < nr; i++) {
221                 struct nvmap_handle_ref *ref;
222
223                 if (!ids[i])
224                         continue;
225
226                 nvmap_ref_lock(client);
227                 ref = _nvmap_validate_id_locked(client, ids[i]);
228                 if (ref) {
229                         struct nvmap_handle *h = ref->handle;
230                         int e = atomic_add_unless(&ref->pin, -1, 0);
231
232                         nvmap_ref_unlock(client);
233
234                         if (!e) {
235                                 nvmap_err(client, "%s unpinning unpinned "
236                                           "handle %08lx\n",
237                                           current->group_leader->comm, ids[i]);
238                         } else {
239                                 do_wake |= handle_unpin(client, h, false);
240                         }
241                 } else {
242                         nvmap_ref_unlock(client);
243                         if (client->super)
244                                 do_wake |= handle_unpin_noref(client, ids[i]);
245                         else
246                                 nvmap_err(client, "%s unpinning invalid "
247                                           "handle %08lx\n",
248                                           current->group_leader->comm, ids[i]);
249                 }
250         }
251
252         if (do_wake)
253                 wake_up(&client->share->pin_wait);
254 }
255
256 /* pins a list of handle_ref objects; same conditions apply as to
257  * _nvmap_handle_pin, but also bumps the pin count of each handle_ref. */
258 int nvmap_pin_ids(struct nvmap_client *client,
259                   unsigned int nr, const unsigned long *ids)
260 {
261         int ret = 0;
262         unsigned int i;
263         struct nvmap_handle **h = (struct nvmap_handle **)ids;
264         struct nvmap_handle_ref *ref;
265
266         /* to optimize for the common case (client provided valid handle
267          * references and the pin succeeds), increment the handle_ref pin
268          * count during validation. in error cases, the tree will need to
269          * be re-walked, since the handle_ref is discarded so that an
270          * allocation isn't required. if a handle_ref is not found,
271          * locally validate that the caller has permission to pin the handle;
272          * handle_refs are not created in this case, so it is possible that
273          * if the caller crashes after pinning a global handle, the handle
274          * will be permanently leaked. */
275         nvmap_ref_lock(client);
276         for (i = 0; i < nr; i++) {
277                 ref = _nvmap_validate_id_locked(client, ids[i]);
278                 if (ref) {
279                         atomic_inc(&ref->pin);
280                         nvmap_handle_get(h[i]);
281                 } else {
282                         struct nvmap_handle *verify;
283                         nvmap_ref_unlock(client);
284                         verify = nvmap_validate_get(client, ids[i]);
285                         if (verify) {
286                                 nvmap_warn(client, "%s pinning unreferenced "
287                                            "handle %p\n",
288                                            current->group_leader->comm, h[i]);
289                         } else {
290                                 ret = -EPERM;
291                                 nr = i;
292                                 break;
293                         }
294                         nvmap_ref_lock(client);
295                 }
296                 if (!h[i]->alloc) {
297                         ret = -EFAULT;
298                         nr = i + 1;
299                         break;
300                 }
301         }
302         nvmap_ref_unlock(client);
303
304         if (ret)
305                 goto out;
306
307         ret = mutex_lock_interruptible(&client->share->pin_lock);
308         if (WARN_ON(ret))
309                 goto out;
310
311         ret = wait_pin_array_locked(client, h, nr);
312
313         mutex_unlock(&client->share->pin_lock);
314
315         if (ret) {
316                 ret = -EINTR;
317         } else {
318                 for (i = 0; i < nr; i++) {
319                         if (h[i]->heap_pgalloc && h[i]->pgalloc.dirty)
320                                 map_iovmm_area(h[i]);
321                 }
322         }
323
324 out:
325         if (ret) {
326                 nvmap_ref_lock(client);
327                 for (i = 0; i < nr; i++) {
328                         if(!ids[i])
329                                 continue;
330
331                         ref = _nvmap_validate_id_locked(client, ids[i]);
332                         if (!ref) {
333                                 nvmap_warn(client, "%s freed handle %p "
334                                            "during pinning\n",
335                                            current->group_leader->comm,
336                                            (void *)ids[i]);
337                                 continue;
338                         }
339                         atomic_dec(&ref->pin);
340                 }
341                 nvmap_ref_unlock(client);
342
343                 for (i = 0; i < nr; i++)
344                         if(h[i])
345                                 nvmap_handle_put(h[i]);
346         }
347
348         return ret;
349 }
350
351 static phys_addr_t handle_phys(struct nvmap_handle *h)
352 {
353         phys_addr_t addr;
354
355         if (h->heap_pgalloc && h->pgalloc.contig) {
356                 addr = page_to_phys(h->pgalloc.pages[0]);
357         } else if (h->heap_pgalloc) {
358                 BUG_ON(!h->pgalloc.area);
359                 addr = h->pgalloc.area->iovm_start;
360         } else {
361                 addr = h->carveout->base;
362         }
363
364         return addr;
365 }
366
367 phys_addr_t nvmap_pin(struct nvmap_client *client,
368                         struct nvmap_handle_ref *ref)
369 {
370         struct nvmap_handle *h;
371         phys_addr_t phys;
372         int ret = 0;
373         unsigned long ref_id;
374
375         if (!ref)
376                 return -EINVAL;
377         ref_id = nvmap_ref_to_id(ref);
378         h = nvmap_get_handle_id(client, ref_id);
379         if (WARN_ON(!h))
380                 return -EINVAL;
381
382         atomic_inc(&ref->pin);
383
384         if (WARN_ON(mutex_lock_interruptible(&client->share->pin_lock))) {
385                 ret = -EINTR;
386         } else {
387                 ret = wait_pin_array_locked(client, &h, 1);
388                 mutex_unlock(&client->share->pin_lock);
389         }
390
391         if (ret) {
392                 atomic_dec(&ref->pin);
393                 nvmap_handle_put(h);
394         } else {
395                 if (h->heap_pgalloc && h->pgalloc.dirty)
396                         map_iovmm_area(h);
397                 phys = handle_phys(h);
398         }
399
400         return ret ?: phys;
401 }
402
403 phys_addr_t nvmap_handle_address(struct nvmap_client *c, unsigned long id)
404 {
405         struct nvmap_handle *h;
406         phys_addr_t phys;
407
408         h = nvmap_get_handle_id(c, id);
409         if (!h)
410                 return -EPERM;
411         mutex_lock(&h->lock);
412         phys = handle_phys(h);
413         mutex_unlock(&h->lock);
414         nvmap_handle_put(h);
415
416         return phys;
417 }
418
419 void nvmap_unpin(struct nvmap_client *client, struct nvmap_handle_ref *ref)
420 {
421         if (!ref)
422                 return;
423
424         atomic_dec(&ref->pin);
425         if (handle_unpin(client, ref->handle, false))
426                 wake_up(&client->share->pin_wait);
427 }
428
429 void nvmap_unpin_handles(struct nvmap_client *client,
430                          struct nvmap_handle **h, int nr)
431 {
432         int i;
433         int do_wake = 0;
434
435         for (i = 0; i < nr; i++) {
436                 if (WARN_ON(!h[i]))
437                         continue;
438                 do_wake |= handle_unpin(client, h[i], false);
439         }
440
441         if (do_wake)
442                 wake_up(&client->share->pin_wait);
443 }
444
445 void *nvmap_mmap(struct nvmap_handle_ref *ref)
446 {
447         struct nvmap_handle *h;
448         pgprot_t prot;
449         unsigned long adj_size;
450         unsigned long offs;
451         struct vm_struct *v;
452         void *p;
453
454         h = nvmap_handle_get(ref->handle);
455         if (!h)
456                 return NULL;
457
458         prot = nvmap_pgprot(h, pgprot_kernel);
459
460         if (h->heap_pgalloc)
461                 return vm_map_ram(h->pgalloc.pages, h->size >> PAGE_SHIFT,
462                                   -1, prot);
463
464         /* carveout - explicitly map the pfns into a vmalloc area */
465
466         nvmap_usecount_inc(h);
467
468         adj_size = h->carveout->base & ~PAGE_MASK;
469         adj_size += h->size;
470         adj_size = PAGE_ALIGN(adj_size);
471
472         v = alloc_vm_area(adj_size, NULL);
473         if (!v) {
474                 nvmap_usecount_dec(h);
475                 nvmap_handle_put(h);
476                 return NULL;
477         }
478
479         p = v->addr + (h->carveout->base & ~PAGE_MASK);
480
481         for (offs = 0; offs < adj_size; offs += PAGE_SIZE) {
482                 unsigned long addr = (unsigned long) v->addr + offs;
483                 unsigned int pfn;
484                 pgd_t *pgd;
485                 pud_t *pud;
486                 pmd_t *pmd;
487                 pte_t *pte;
488
489                 pfn = __phys_to_pfn(h->carveout->base + offs);
490                 pgd = pgd_offset_k(addr);
491                 pud = pud_alloc(&init_mm, pgd, addr);
492                 if (!pud)
493                         break;
494                 pmd = pmd_alloc(&init_mm, pud, addr);
495                 if (!pmd)
496                         break;
497                 pte = pte_alloc_kernel(pmd, addr);
498                 if (!pte)
499                         break;
500                 set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
501                 flush_tlb_kernel_page(addr);
502         }
503
504         if (offs != adj_size) {
505                 free_vm_area(v);
506                 nvmap_usecount_dec(h);
507                 nvmap_handle_put(h);
508                 return NULL;
509         }
510
511         /* leave the handle ref count incremented by 1, so that
512          * the handle will not be freed while the kernel mapping exists.
513          * nvmap_handle_put will be called by unmapping this address */
514         return p;
515 }
516
517 void nvmap_munmap(struct nvmap_handle_ref *ref, void *addr)
518 {
519         struct nvmap_handle *h;
520
521         if (!ref)
522                 return;
523
524         h = ref->handle;
525
526         if (h->heap_pgalloc) {
527                 vm_unmap_ram(addr, h->size >> PAGE_SHIFT);
528         } else {
529                 struct vm_struct *vm;
530                 addr -= (h->carveout->base & ~PAGE_MASK);
531                 vm = remove_vm_area(addr);
532                 BUG_ON(!vm);
533                 kfree(vm);
534                 nvmap_usecount_dec(h);
535         }
536         nvmap_handle_put(h);
537 }
538
539 struct nvmap_handle_ref *nvmap_alloc(struct nvmap_client *client, size_t size,
540                                      size_t align, unsigned int flags,
541                                      unsigned int heap_mask)
542 {
543         const unsigned int default_heap = (NVMAP_HEAP_SYSMEM |
544                                            NVMAP_HEAP_CARVEOUT_GENERIC);
545         struct nvmap_handle_ref *r = NULL;
546         int err;
547
548         if (heap_mask == 0)
549                 heap_mask = default_heap;
550
551         r = nvmap_create_handle(client, size);
552         if (IS_ERR(r))
553                 return r;
554
555         err = nvmap_alloc_handle_id(client, nvmap_ref_to_id(r),
556                                     heap_mask, align, flags);
557
558         if (err) {
559                 nvmap_free_handle_id(client, nvmap_ref_to_id(r));
560                 return ERR_PTR(err);
561         }
562
563         return r;
564 }
565
566 /* allocates memory with specifed iovm_start address. */
567 struct nvmap_handle_ref *nvmap_alloc_iovm(struct nvmap_client *client,
568         size_t size, size_t align, unsigned int flags, unsigned int iovm_start)
569 {
570         int err;
571         struct nvmap_handle *h;
572         struct nvmap_handle_ref *r;
573         const unsigned int default_heap = NVMAP_HEAP_IOVMM;
574
575         /* size need to be more than one page.
576          * otherwise heap preference would change to system heap.
577          */
578         if (size <= PAGE_SIZE)
579                 size = PAGE_SIZE << 1;
580         r = nvmap_create_handle(client, size);
581         if (IS_ERR_OR_NULL(r))
582                 return r;
583
584         h = r->handle;
585         h->pgalloc.iovm_addr = iovm_start;
586         err = nvmap_alloc_handle_id(client, nvmap_ref_to_id(r),
587                         default_heap, align, flags);
588         if (err)
589                 goto fail;
590
591         err = mutex_lock_interruptible(&client->share->pin_lock);
592         if (WARN_ON(err))
593                 goto fail;
594         err = pin_locked(client, h);
595         mutex_unlock(&client->share->pin_lock);
596         if (err)
597                 goto fail;
598         return r;
599
600 fail:
601         nvmap_free_handle_id(client, nvmap_ref_to_id(r));
602         return ERR_PTR(err);
603 }
604
605 void nvmap_free_iovm(struct nvmap_client *client, struct nvmap_handle_ref *r)
606 {
607         unsigned long ref_id = nvmap_ref_to_id(r);
608
609         nvmap_unpin_ids(client, 1, &ref_id);
610         nvmap_free_handle_id(client, ref_id);
611 }
612
613 void nvmap_free(struct nvmap_client *client, struct nvmap_handle_ref *r)
614 {
615         if (!r)
616                 return;
617
618         nvmap_free_handle_id(client, nvmap_ref_to_id(r));
619 }