video: tegra: nvmap: implement fast api functions
[linux-2.6.git] / drivers / video / tegra / nvmap / nvmap.c
1 /*
2  * drivers/video/tegra/nvmap/nvmap.c
3  *
4  * Memory manager for Tegra GPU
5  *
6  * Copyright (c) 2009-2012, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include <linux/err.h>
24 #include <linux/highmem.h>
25 #include <linux/io.h>
26 #include <linux/rbtree.h>
27 #include <linux/vmalloc.h>
28 #include <linux/wait.h>
29 #include <linux/slab.h>
30
31 #include <asm/pgtable.h>
32 #include <asm/tlbflush.h>
33
34 #include <mach/iovmm.h>
35 #include <linux/nvmap.h>
36 #include <trace/events/nvmap.h>
37
38 #include "nvmap.h"
39 #include "nvmap_mru.h"
40
41 /* private nvmap_handle flag for pinning duplicate detection */
42 #define NVMAP_HANDLE_VISITED (0x1ul << 31)
43
44 /* map the backing pages for a heap_pgalloc handle into its IOVMM area */
45 static void map_iovmm_area(struct nvmap_handle *h)
46 {
47         tegra_iovmm_addr_t va;
48         unsigned long i;
49
50         BUG_ON(!h->heap_pgalloc || !h->pgalloc.area);
51         BUG_ON(h->size & ~PAGE_MASK);
52         WARN_ON(!h->pgalloc.dirty);
53
54         for (va = h->pgalloc.area->iovm_start, i = 0;
55              va < (h->pgalloc.area->iovm_start + h->size);
56              i++, va += PAGE_SIZE) {
57                 unsigned long pfn;
58
59                 pfn = page_to_pfn(h->pgalloc.pages[i]);
60                 BUG_ON(!pfn_valid(pfn));
61                 tegra_iovmm_vm_insert_pfn(h->pgalloc.area, va, pfn);
62         }
63         h->pgalloc.dirty = false;
64 }
65
66 /* must be called inside nvmap_pin_lock, to ensure that an entire stream
67  * of pins will complete without racing with a second stream. handle should
68  * have nvmap_handle_get (or nvmap_validate_get) called before calling
69  * this function. */
70 static int pin_locked(struct nvmap_client *client, struct nvmap_handle *h)
71 {
72         struct tegra_iovmm_area *area;
73         BUG_ON(!h->alloc);
74         if (atomic_inc_return(&h->pin) == 1) {
75                 if (h->heap_pgalloc && !h->pgalloc.contig) {
76                         area = nvmap_handle_iovmm_locked(client, h);
77                         if (!area) {
78                                 /* no race here, inside the pin mutex */
79                                 atomic_dec(&h->pin);
80                                 return -ENOMEM;
81                         }
82                         if (area != h->pgalloc.area)
83                                 h->pgalloc.dirty = true;
84                         h->pgalloc.area = area;
85                 }
86         }
87         trace_handle_pin(client, h, atomic_read(&h->pin));
88         return 0;
89 }
90
91 /* doesn't need to be called inside nvmap_pin_lock, since this will only
92  * expand the available VM area */
93 static int handle_unpin(struct nvmap_client *client,
94                 struct nvmap_handle *h, int free_vm)
95 {
96         int ret = 0;
97         nvmap_mru_lock(client->share);
98
99         if (atomic_read(&h->pin) == 0) {
100                 trace_handle_unpin_error(client, h, atomic_read(&h->pin));
101                 nvmap_err(client, "%s unpinning unpinned handle %p\n",
102                           current->group_leader->comm, h);
103                 nvmap_mru_unlock(client->share);
104                 return 0;
105         }
106
107         BUG_ON(!h->alloc);
108
109         if (!atomic_dec_return(&h->pin)) {
110                 if (h->heap_pgalloc && h->pgalloc.area) {
111                         /* if a secure handle is clean (i.e., mapped into
112                          * IOVMM, it needs to be zapped on unpin. */
113                         if (h->secure && !h->pgalloc.dirty) {
114                                 tegra_iovmm_zap_vm(h->pgalloc.area);
115                                 h->pgalloc.dirty = true;
116                         }
117                         if (free_vm) {
118                                 tegra_iovmm_free_vm(h->pgalloc.area);
119                                 h->pgalloc.area = NULL;
120                         } else
121                                 nvmap_mru_insert_locked(client->share, h);
122                         ret = 1;
123                 }
124         }
125
126         trace_handle_unpin(client, h, atomic_read(&h->pin));
127         nvmap_mru_unlock(client->share);
128         nvmap_handle_put(h);
129         return ret;
130 }
131
132
133 static int pin_array_locked(struct nvmap_client *client,
134                 struct nvmap_handle **h, int count)
135 {
136         int pinned;
137         int i;
138         int err = 0;
139
140         nvmap_mru_lock(client->share);
141         for (pinned = 0; pinned < count; pinned++) {
142                 err = pin_locked(client, h[pinned]);
143                 if (err)
144                         break;
145         }
146         nvmap_mru_unlock(client->share);
147
148         if (err) {
149                 /* unpin pinned handles */
150                 for (i = 0; i < pinned; i++) {
151                         /* inc ref counter, because
152                          * handle_unpin decrements it */
153                         nvmap_handle_get(h[i]);
154                         /* unpin handles and free vm */
155                         handle_unpin(client, h[i], true);
156                 }
157         }
158
159         if (err && tegra_iovmm_get_max_free(client->share->iovmm) >=
160                                                         client->iovm_limit) {
161                 /* First attempt to pin in empty iovmm
162                  * may still fail because of fragmentation caused by
163                  * placing handles in MRU areas. After such failure
164                  * all MRU gets cleaned and iovm space is freed.
165                  *
166                  * We have to do pinning again here since there might be is
167                  * no more incoming pin_wait wakeup calls from unpin
168                  * operations */
169                 nvmap_mru_lock(client->share);
170                 for (pinned = 0; pinned < count; pinned++) {
171                         err = pin_locked(client, h[pinned]);
172                         if (err)
173                                 break;
174                 }
175                 nvmap_mru_unlock(client->share);
176
177                 if (err) {
178                         pr_err("Pinning in empty iovmm failed!!!\n");
179                         BUG_ON(1);
180                 }
181         }
182         return err;
183 }
184
185 static int wait_pin_array_locked(struct nvmap_client *client,
186                 struct nvmap_handle **h, int count)
187 {
188         int ret = 0;
189
190         ret = pin_array_locked(client, h, count);
191
192         if (ret) {
193                 ret = wait_event_interruptible(client->share->pin_wait,
194                                 !pin_array_locked(client, h, count));
195         }
196         return ret ? -EINTR : 0;
197 }
198
199 static int handle_unpin_noref(struct nvmap_client *client, unsigned long id)
200 {
201         struct nvmap_handle *h;
202         int w;
203
204         h = nvmap_validate_get(client, id);
205         if (unlikely(!h)) {
206                 nvmap_err(client, "%s attempting to unpin invalid handle %p\n",
207                           current->group_leader->comm, (void *)id);
208                 return 0;
209         }
210
211         nvmap_err(client, "%s unpinning unreferenced handle %p\n",
212                   current->group_leader->comm, h);
213         WARN_ON(1);
214
215         w = handle_unpin(client, h, false);
216         nvmap_handle_put(h);
217         return w;
218 }
219
220 void nvmap_unpin_ids(struct nvmap_client *client,
221                      unsigned int nr, const unsigned long *ids)
222 {
223         unsigned int i;
224         int do_wake = 0;
225
226         for (i = 0; i < nr; i++) {
227                 struct nvmap_handle_ref *ref;
228
229                 if (!ids[i])
230                         continue;
231
232                 nvmap_ref_lock(client);
233                 ref = _nvmap_validate_id_locked(client, ids[i]);
234                 if (ref) {
235                         struct nvmap_handle *h = ref->handle;
236                         int e = atomic_add_unless(&ref->pin, -1, 0);
237
238                         nvmap_ref_unlock(client);
239
240                         if (!e) {
241                                 nvmap_err(client, "%s unpinning unpinned "
242                                           "handle %08lx\n",
243                                           current->group_leader->comm, ids[i]);
244                         } else {
245                                 do_wake |= handle_unpin(client, h, false);
246                         }
247                 } else {
248                         nvmap_ref_unlock(client);
249                         if (client->super)
250                                 do_wake |= handle_unpin_noref(client, ids[i]);
251                         else
252                                 nvmap_err(client, "%s unpinning invalid "
253                                           "handle %08lx\n",
254                                           current->group_leader->comm, ids[i]);
255                 }
256         }
257
258         if (do_wake)
259                 wake_up(&client->share->pin_wait);
260 }
261
262 /* pins a list of handle_ref objects; same conditions apply as to
263  * _nvmap_handle_pin, but also bumps the pin count of each handle_ref. */
264 int nvmap_pin_ids(struct nvmap_client *client,
265                   unsigned int nr, const unsigned long *ids)
266 {
267         int ret = 0;
268         unsigned int i;
269         struct nvmap_handle **h = (struct nvmap_handle **)ids;
270         struct nvmap_handle_ref *ref;
271
272         /* to optimize for the common case (client provided valid handle
273          * references and the pin succeeds), increment the handle_ref pin
274          * count during validation. in error cases, the tree will need to
275          * be re-walked, since the handle_ref is discarded so that an
276          * allocation isn't required. if a handle_ref is not found,
277          * locally validate that the caller has permission to pin the handle;
278          * handle_refs are not created in this case, so it is possible that
279          * if the caller crashes after pinning a global handle, the handle
280          * will be permanently leaked. */
281         nvmap_ref_lock(client);
282         for (i = 0; i < nr; i++) {
283                 ref = _nvmap_validate_id_locked(client, ids[i]);
284                 if (ref) {
285                         atomic_inc(&ref->pin);
286                         nvmap_handle_get(h[i]);
287                 } else {
288                         struct nvmap_handle *verify;
289                         nvmap_ref_unlock(client);
290                         verify = nvmap_validate_get(client, ids[i]);
291                         if (verify) {
292                                 nvmap_warn(client, "%s pinning unreferenced "
293                                            "handle %p\n",
294                                            current->group_leader->comm, h[i]);
295                         } else {
296                                 ret = -EPERM;
297                                 nr = i;
298                                 break;
299                         }
300                         nvmap_ref_lock(client);
301                 }
302                 if (!h[i]->alloc) {
303                         ret = -EFAULT;
304                         nr = i + 1;
305                         break;
306                 }
307         }
308         nvmap_ref_unlock(client);
309
310         if (ret)
311                 goto out;
312
313         ret = mutex_lock_interruptible(&client->share->pin_lock);
314         if (WARN_ON(ret))
315                 goto out;
316
317         ret = wait_pin_array_locked(client, h, nr);
318
319         mutex_unlock(&client->share->pin_lock);
320
321         if (ret) {
322                 ret = -EINTR;
323         } else {
324                 for (i = 0; i < nr; i++) {
325                         if (h[i]->heap_pgalloc && h[i]->pgalloc.dirty)
326                                 map_iovmm_area(h[i]);
327                 }
328         }
329
330 out:
331         if (ret) {
332                 nvmap_ref_lock(client);
333                 for (i = 0; i < nr; i++) {
334                         if (!ids[i])
335                                 continue;
336
337                         ref = _nvmap_validate_id_locked(client, ids[i]);
338                         if (!ref) {
339                                 nvmap_warn(client, "%s freed handle %p "
340                                            "during pinning\n",
341                                            current->group_leader->comm,
342                                            (void *)ids[i]);
343                                 continue;
344                         }
345                         atomic_dec(&ref->pin);
346                 }
347                 nvmap_ref_unlock(client);
348
349                 for (i = 0; i < nr; i++)
350                         if(h[i])
351                                 nvmap_handle_put(h[i]);
352         }
353
354         return ret;
355 }
356
357 static int nvmap_validate_get_pin_array(struct nvmap_client *client,
358                                 unsigned long *h,
359                                 long unsigned id_type_mask,
360                                 long unsigned id_type,
361                                 int nr,
362                                 struct nvmap_handle **unique_handles,
363                                 struct nvmap_handle_ref **unique_handle_refs)
364 {
365         int i;
366         int err = 0;
367         int count = 0;
368         unsigned long last_h = 0;
369         struct nvmap_handle_ref *last_ref = 0;
370
371         nvmap_ref_lock(client);
372
373         for (i = 0; i < nr; i++) {
374                 struct nvmap_handle_ref *ref;
375
376                 if ((h[i] & id_type_mask) != id_type)
377                         continue;
378
379                 if (last_h == h[i])
380                         continue;
381
382                 ref = _nvmap_validate_id_locked(client, h[i]);
383
384                 if (!ref)
385                         nvmap_err(client, "failed to validate id\n");
386                 else if (!ref->handle)
387                         nvmap_err(client, "id had no associated handle\n");
388                 else if (!ref->handle->alloc)
389                         nvmap_err(client, "handle had no allocation\n");
390
391                 if (!ref || !ref->handle || !ref->handle->alloc) {
392                         err = -EPERM;
393                         break;
394                 }
395
396                 last_h = h[i];
397                 last_ref = ref;
398                 /* a handle may be referenced multiple times in arr, but
399                  * it will only be pinned once; this ensures that the
400                  * minimum number of sync-queue slots in the host driver
401                  * are dedicated to storing unpin lists, which allows
402                  * for greater parallelism between the CPU and graphics
403                  * processor */
404                 if (ref->handle->flags & NVMAP_HANDLE_VISITED)
405                         continue;
406
407                 ref->handle->flags |= NVMAP_HANDLE_VISITED;
408
409                 unique_handles[count] = nvmap_handle_get(ref->handle);
410
411                 /* Duplicate handle */
412                 atomic_inc(&ref->dupes);
413                 nvmap_handle_get(ref->handle);
414                 unique_handle_refs[count] = ref;
415
416                 BUG_ON(!unique_handles[count]);
417                 count++;
418         }
419
420         nvmap_ref_unlock(client);
421
422         if (err) {
423                 for (i = 0; i < count; i++) {
424                         unique_handles[i]->flags &= ~NVMAP_HANDLE_VISITED;
425                         /* pin ref */
426                         nvmap_handle_put(unique_handles[i]);
427                         /* ref count */
428                         atomic_dec(&unique_handle_refs[i]->dupes);
429                         nvmap_handle_put(unique_handles[i]);
430                 }
431         }
432
433         return err ? err : count;
434 }
435
436 /*
437  * @client:       nvmap_client which should be used for validation;
438  *                should be owned by the process which is submitting
439  *                command buffers
440  * @ids:          array of nvmap_handles to pin
441  * @id_type_mask: bitmask which defines handle type field in handle id.
442  * @id_type:      only handles with of this type will be pinned. Handles with
443  *                other type are ignored.
444  * @nr:           number of entries in arr
445  * @unique_arr:   list of nvmap_handle objects which were pinned by
446  *                nvmap_pin_array. Must be unpinned after use
447  * @unique_arr_ref: list of duplicated nvmap_handle_refs corresponding
448  *                  to unique_arr. Must be freed after use.
449  */
450 int nvmap_pin_array(struct nvmap_client *client,
451                 unsigned long   *ids,
452                 long unsigned id_type_mask,
453                 long unsigned id_type,
454                 int nr,
455                 struct nvmap_handle **unique_arr,
456                 struct nvmap_handle_ref **unique_arr_refs)
457 {
458         int count = 0;
459         int ret = 0;
460         int i;
461
462         if (mutex_lock_interruptible(&client->share->pin_lock)) {
463                 nvmap_err(client, "%s interrupted when acquiring pin lock\n",
464                            current->group_leader->comm);
465                 return -EINTR;
466         }
467
468         count = nvmap_validate_get_pin_array(client, ids,
469                         id_type_mask, id_type, nr,
470                         unique_arr, unique_arr_refs);
471
472         if (count < 0) {
473                 mutex_unlock(&client->share->pin_lock);
474                 nvmap_warn(client, "failed to validate pin array\n");
475                 return count;
476         }
477
478         for (i = 0; i < count; i++)
479                 unique_arr[i]->flags &= ~NVMAP_HANDLE_VISITED;
480
481         ret = wait_pin_array_locked(client, unique_arr, count);
482
483         mutex_unlock(&client->share->pin_lock);
484
485         if (WARN_ON(ret)) {
486                 for (i = 0; i < count; i++) {
487                         /* pin ref */
488                         nvmap_handle_put(unique_arr[i]);
489                         /* remove duplicate */
490                         atomic_dec(&unique_arr_refs[i]->dupes);
491                         nvmap_handle_put(unique_arr[i]);
492                 }
493                 return ret;
494         } else {
495                 for (i = 0; i < count; i++) {
496                         if (unique_arr[i]->heap_pgalloc &&
497                             unique_arr[i]->pgalloc.dirty)
498                                 map_iovmm_area(unique_arr[i]);
499
500                         atomic_inc(&unique_arr_refs[i]->pin);
501                 }
502         }
503         return count;
504 }
505
506 static phys_addr_t handle_phys(struct nvmap_handle *h)
507 {
508         phys_addr_t addr;
509
510         if (h->heap_pgalloc && h->pgalloc.contig) {
511                 addr = page_to_phys(h->pgalloc.pages[0]);
512         } else if (h->heap_pgalloc) {
513                 BUG_ON(!h->pgalloc.area);
514                 addr = h->pgalloc.area->iovm_start;
515         } else {
516                 addr = h->carveout->base;
517         }
518
519         return addr;
520 }
521
522 /*
523  * Get physical address of the handle. Handle should be
524  * already validated and pinned.
525  */
526 phys_addr_t _nvmap_get_addr_from_id(u32 id)
527 {
528         struct nvmap_handle *h = (struct nvmap_handle *)id;
529         return handle_phys(h);
530 }
531
532 /*
533  * Pin handle without slow validation step
534  */
535 phys_addr_t _nvmap_pin(struct nvmap_client *client,
536                         struct nvmap_handle_ref *ref)
537 {
538         int ret = 0;
539         struct nvmap_handle *h;
540         phys_addr_t phys;
541
542         if (!ref)
543                 return -EINVAL;
544
545         h = ref->handle;
546
547         if (WARN_ON(!h))
548                 return -EINVAL;
549
550         h = nvmap_handle_get(h);
551
552         atomic_inc(&ref->pin);
553
554         if (WARN_ON(mutex_lock_interruptible(&client->share->pin_lock))) {
555                 ret = -EINTR;
556         } else {
557                 ret = wait_pin_array_locked(client, &h, 1);
558                 mutex_unlock(&client->share->pin_lock);
559         }
560
561         if (ret) {
562                 atomic_dec(&ref->pin);
563                 nvmap_handle_put(h);
564         } else {
565                 if (h->heap_pgalloc && h->pgalloc.dirty)
566                         map_iovmm_area(h);
567                 phys = handle_phys(h);
568         }
569
570         return ret ?: phys;
571 }
572
573 phys_addr_t nvmap_pin(struct nvmap_client *client,
574                         struct nvmap_handle_ref *ref)
575 {
576         struct nvmap_handle *h;
577
578         if (!ref)
579                 return -EINVAL;
580         if (WARN_ON(!ref->handle))
581                 return -EINVAL;
582
583         nvmap_ref_lock(client);
584         ref = _nvmap_validate_id_locked(client, (unsigned long)ref->handle);
585         if (ref)
586                 h = ref->handle;
587         nvmap_ref_unlock(client);
588
589         return _nvmap_pin(client, ref);
590 }
591
592 phys_addr_t nvmap_handle_address(struct nvmap_client *c, unsigned long id)
593 {
594         struct nvmap_handle *h;
595         phys_addr_t phys;
596
597         h = nvmap_get_handle_id(c, id);
598         if (!h)
599                 return -EPERM;
600         mutex_lock(&h->lock);
601         phys = handle_phys(h);
602         mutex_unlock(&h->lock);
603         nvmap_handle_put(h);
604
605         return phys;
606 }
607
608 void nvmap_unpin(struct nvmap_client *client, struct nvmap_handle_ref *ref)
609 {
610         if (!ref)
611                 return;
612
613         atomic_dec(&ref->pin);
614         if (handle_unpin(client, ref->handle, false))
615                 wake_up(&client->share->pin_wait);
616 }
617
618 void nvmap_unpin_handles(struct nvmap_client *client,
619                          struct nvmap_handle **h, int nr)
620 {
621         int i;
622         int do_wake = 0;
623
624         for (i = 0; i < nr; i++) {
625                 if (WARN_ON(!h[i]))
626                         continue;
627                 do_wake |= handle_unpin(client, h[i], false);
628         }
629
630         if (do_wake)
631                 wake_up(&client->share->pin_wait);
632 }
633
634 void *nvmap_kmap(struct nvmap_handle_ref *ref, unsigned int pagenum)
635 {
636         struct nvmap_handle *h;
637         unsigned long paddr;
638         unsigned long kaddr;
639         pgprot_t prot;
640         pte_t **pte;
641
642         BUG_ON(!ref);
643         h = nvmap_handle_get(ref->handle);
644         if (!h)
645                 return NULL;
646
647         BUG_ON(pagenum >= h->size >> PAGE_SHIFT);
648         prot = nvmap_pgprot(h, pgprot_kernel);
649         pte = nvmap_alloc_pte(nvmap_dev, (void **)&kaddr);
650         if (!pte)
651                 goto out;
652
653         if (h->heap_pgalloc)
654                 paddr = page_to_phys(h->pgalloc.pages[pagenum]);
655         else
656                 paddr = h->carveout->base + pagenum * PAGE_SIZE;
657
658         set_pte_at(&init_mm, kaddr, *pte,
659                                 pfn_pte(__phys_to_pfn(paddr), prot));
660         flush_tlb_kernel_page(kaddr);
661         return (void *)kaddr;
662 out:
663         nvmap_handle_put(ref->handle);
664         return NULL;
665 }
666
667 void nvmap_kunmap(struct nvmap_handle_ref *ref, unsigned int pagenum,
668                   void *addr)
669 {
670         struct nvmap_handle *h;
671         unsigned long paddr;
672         pte_t **pte;
673
674         BUG_ON(!addr || !ref);
675         h = ref->handle;
676
677         if (h->heap_pgalloc)
678                 paddr = page_to_phys(h->pgalloc.pages[pagenum]);
679         else
680                 paddr = h->carveout->base + pagenum * PAGE_SIZE;
681
682         if (h->flags != NVMAP_HANDLE_UNCACHEABLE &&
683             h->flags != NVMAP_HANDLE_WRITE_COMBINE) {
684                 dmac_flush_range(addr, addr + PAGE_SIZE);
685                 outer_flush_range(paddr, paddr + PAGE_SIZE);
686         }
687
688         pte = nvmap_vaddr_to_pte(nvmap_dev, (unsigned long)addr);
689         nvmap_free_pte(nvmap_dev, pte);
690         nvmap_handle_put(h);
691 }
692
693 void *nvmap_mmap(struct nvmap_handle_ref *ref)
694 {
695         struct nvmap_handle *h;
696         pgprot_t prot;
697         unsigned long adj_size;
698         unsigned long offs;
699         struct vm_struct *v;
700         void *p;
701
702         h = nvmap_handle_get(ref->handle);
703         if (!h)
704                 return NULL;
705
706         prot = nvmap_pgprot(h, pgprot_kernel);
707
708         if (h->heap_pgalloc)
709                 return vm_map_ram(h->pgalloc.pages, h->size >> PAGE_SHIFT,
710                                   -1, prot);
711
712         /* carveout - explicitly map the pfns into a vmalloc area */
713
714         nvmap_usecount_inc(h);
715
716         adj_size = h->carveout->base & ~PAGE_MASK;
717         adj_size += h->size;
718         adj_size = PAGE_ALIGN(adj_size);
719
720         v = alloc_vm_area(adj_size, NULL);
721         if (!v) {
722                 nvmap_usecount_dec(h);
723                 nvmap_handle_put(h);
724                 return NULL;
725         }
726
727         p = v->addr + (h->carveout->base & ~PAGE_MASK);
728
729         for (offs = 0; offs < adj_size; offs += PAGE_SIZE) {
730                 unsigned long addr = (unsigned long) v->addr + offs;
731                 unsigned int pfn;
732                 pgd_t *pgd;
733                 pud_t *pud;
734                 pmd_t *pmd;
735                 pte_t *pte;
736
737                 pfn = __phys_to_pfn(h->carveout->base + offs);
738                 pgd = pgd_offset_k(addr);
739                 pud = pud_alloc(&init_mm, pgd, addr);
740                 if (!pud)
741                         break;
742                 pmd = pmd_alloc(&init_mm, pud, addr);
743                 if (!pmd)
744                         break;
745                 pte = pte_alloc_kernel(pmd, addr);
746                 if (!pte)
747                         break;
748                 set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
749                 flush_tlb_kernel_page(addr);
750         }
751
752         if (offs != adj_size) {
753                 free_vm_area(v);
754                 nvmap_usecount_dec(h);
755                 nvmap_handle_put(h);
756                 return NULL;
757         }
758
759         /* leave the handle ref count incremented by 1, so that
760          * the handle will not be freed while the kernel mapping exists.
761          * nvmap_handle_put will be called by unmapping this address */
762         return p;
763 }
764
765 void nvmap_munmap(struct nvmap_handle_ref *ref, void *addr)
766 {
767         struct nvmap_handle *h;
768
769         if (!ref)
770                 return;
771
772         h = ref->handle;
773
774         if (h->heap_pgalloc) {
775                 vm_unmap_ram(addr, h->size >> PAGE_SHIFT);
776         } else {
777                 struct vm_struct *vm;
778                 addr -= (h->carveout->base & ~PAGE_MASK);
779                 vm = remove_vm_area(addr);
780                 BUG_ON(!vm);
781                 kfree(vm);
782                 nvmap_usecount_dec(h);
783         }
784         nvmap_handle_put(h);
785 }
786
787 struct nvmap_handle_ref *nvmap_alloc(struct nvmap_client *client, size_t size,
788                                      size_t align, unsigned int flags,
789                                      unsigned int heap_mask)
790 {
791         const unsigned int default_heap = (NVMAP_HEAP_SYSMEM |
792                                            NVMAP_HEAP_CARVEOUT_GENERIC);
793         struct nvmap_handle_ref *r = NULL;
794         int err;
795
796         if (heap_mask == 0)
797                 heap_mask = default_heap;
798
799         r = nvmap_create_handle(client, size);
800         if (IS_ERR(r))
801                 return r;
802
803         err = nvmap_alloc_handle_id(client, nvmap_ref_to_id(r),
804                                     heap_mask, align, flags);
805
806         if (err) {
807                 nvmap_free_handle_id(client, nvmap_ref_to_id(r));
808                 return ERR_PTR(err);
809         }
810
811         return r;
812 }
813
814 /* allocates memory with specifed iovm_start address. */
815 struct nvmap_handle_ref *nvmap_alloc_iovm(struct nvmap_client *client,
816         size_t size, size_t align, unsigned int flags, unsigned int iovm_start)
817 {
818         int err;
819         struct nvmap_handle *h;
820         struct nvmap_handle_ref *r;
821         const unsigned int default_heap = NVMAP_HEAP_IOVMM;
822
823         /* size need to be more than one page.
824          * otherwise heap preference would change to system heap.
825          */
826         if (size <= PAGE_SIZE)
827                 size = PAGE_SIZE << 1;
828         r = nvmap_create_handle(client, size);
829         if (IS_ERR_OR_NULL(r))
830                 return r;
831
832         h = r->handle;
833         h->pgalloc.iovm_addr = iovm_start;
834         err = nvmap_alloc_handle_id(client, nvmap_ref_to_id(r),
835                         default_heap, align, flags);
836         if (err)
837                 goto fail;
838
839         err = mutex_lock_interruptible(&client->share->pin_lock);
840         if (WARN_ON(err))
841                 goto fail;
842
843         nvmap_mru_lock(client->share);
844         err = pin_locked(client, h);
845         nvmap_mru_unlock(client->share);
846
847         mutex_unlock(&client->share->pin_lock);
848         if (err)
849                 goto fail;
850         return r;
851
852 fail:
853         nvmap_free_handle_id(client, nvmap_ref_to_id(r));
854         return ERR_PTR(err);
855 }
856
857 void nvmap_free_iovm(struct nvmap_client *client, struct nvmap_handle_ref *r)
858 {
859         unsigned long ref_id = nvmap_ref_to_id(r);
860
861         nvmap_unpin_ids(client, 1, &ref_id);
862         nvmap_free_handle_id(client, ref_id);
863 }
864
865 void nvmap_free(struct nvmap_client *client, struct nvmap_handle_ref *r)
866 {
867         if (!r)
868                 return;
869
870         nvmap_free_handle_id(client, nvmap_ref_to_id(r));
871 }