]> nv-tegra.nvidia Code Review - linux-2.6.git/blob - drivers/video/tegra/nvmap/nvmap.c
video: tegra: nvmap: free vm_struct in nvmap_munmap()
[linux-2.6.git] / drivers / video / tegra / nvmap / nvmap.c
1 /*
2  * drivers/video/tegra/nvmap/nvmap.c
3  *
4  * Memory manager for Tegra GPU
5  *
6  * Copyright (c) 2009-2011, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include <linux/err.h>
24 #include <linux/highmem.h>
25 #include <linux/io.h>
26 #include <linux/rbtree.h>
27 #include <linux/vmalloc.h>
28 #include <linux/wait.h>
29 #include <linux/slab.h>
30
31 #include <asm/pgtable.h>
32 #include <asm/tlbflush.h>
33
34 #include <mach/iovmm.h>
35 #include <mach/nvmap.h>
36
37 #include "nvmap.h"
38 #include "nvmap_mru.h"
39
40 /* private nvmap_handle flag for pinning duplicate detection */
41 #define NVMAP_HANDLE_VISITED (0x1ul << 31)
42
43 /* map the backing pages for a heap_pgalloc handle into its IOVMM area */
44 static void map_iovmm_area(struct nvmap_handle *h)
45 {
46         tegra_iovmm_addr_t va;
47         unsigned long i;
48
49         BUG_ON(!h->heap_pgalloc || !h->pgalloc.area);
50         BUG_ON(h->size & ~PAGE_MASK);
51         WARN_ON(!h->pgalloc.dirty);
52
53         for (va = h->pgalloc.area->iovm_start, i = 0;
54              va < (h->pgalloc.area->iovm_start + h->size);
55              i++, va += PAGE_SIZE) {
56                 BUG_ON(!pfn_valid(page_to_pfn(h->pgalloc.pages[i])));
57                 tegra_iovmm_vm_insert_pfn(h->pgalloc.area, va,
58                                           page_to_pfn(h->pgalloc.pages[i]));
59         }
60         h->pgalloc.dirty = false;
61 }
62
63 /* must be called inside nvmap_pin_lock, to ensure that an entire stream
64  * of pins will complete without racing with a second stream. handle should
65  * have nvmap_handle_get (or nvmap_validate_get) called before calling
66  * this function. */
67 static int pin_locked(struct nvmap_client *client, struct nvmap_handle *h)
68 {
69         struct tegra_iovmm_area *area;
70         BUG_ON(!h->alloc);
71
72         if (atomic_inc_return(&h->pin) == 1) {
73                 if (h->heap_pgalloc && !h->pgalloc.contig) {
74                         area = nvmap_handle_iovmm(client, h);
75                         if (!area) {
76                                 /* no race here, inside the pin mutex */
77                                 atomic_dec(&h->pin);
78                                 return -ENOMEM;
79                         }
80                         if (area != h->pgalloc.area)
81                                 h->pgalloc.dirty = true;
82                         h->pgalloc.area = area;
83                 }
84         }
85         return 0;
86 }
87
88 static int wait_pin_locked(struct nvmap_client *client, struct nvmap_handle *h)
89 {
90         int ret = 0;
91
92         ret = pin_locked(client, h);
93
94         if (ret) {
95                 ret = wait_event_interruptible(client->share->pin_wait,
96                                                !pin_locked(client, h));
97         }
98
99         return ret ? -EINTR : 0;
100
101 }
102
103 /* doesn't need to be called inside nvmap_pin_lock, since this will only
104  * expand the available VM area */
105 static int handle_unpin(struct nvmap_client *client, struct nvmap_handle *h)
106 {
107         int ret = 0;
108
109         nvmap_mru_lock(client->share);
110
111         if (atomic_read(&h->pin) == 0) {
112                 nvmap_err(client, "%s unpinning unpinned handle %p\n",
113                           current->group_leader->comm, h);
114                 nvmap_mru_unlock(client->share);
115                 return 0;
116         }
117
118         BUG_ON(!h->alloc);
119
120         if (!atomic_dec_return(&h->pin)) {
121                 if (h->heap_pgalloc && h->pgalloc.area) {
122                         /* if a secure handle is clean (i.e., mapped into
123                          * IOVMM, it needs to be zapped on unpin. */
124                         if (h->secure && !h->pgalloc.dirty) {
125                                 tegra_iovmm_zap_vm(h->pgalloc.area);
126                                 h->pgalloc.dirty = true;
127                         }
128                         nvmap_mru_insert_locked(client->share, h);
129                         ret = 1;
130                 }
131         }
132
133         nvmap_mru_unlock(client->share);
134
135         nvmap_handle_put(h);
136         return ret;
137 }
138
139 static int handle_unpin_noref(struct nvmap_client *client, unsigned long id)
140 {
141         struct nvmap_handle *h;
142         int w;
143
144         h = nvmap_validate_get(client, id);
145         if (unlikely(!h)) {
146                 nvmap_err(client, "%s attempting to unpin invalid handle %p\n",
147                           current->group_leader->comm, (void *)id);
148                 return 0;
149         }
150
151         nvmap_err(client, "%s unpinning unreferenced handle %p\n",
152                   current->group_leader->comm, h);
153         WARN_ON(1);
154
155         w = handle_unpin(client, h);
156         nvmap_handle_put(h);
157         return w;
158 }
159
160 void nvmap_unpin_ids(struct nvmap_client *client,
161                      unsigned int nr, const unsigned long *ids)
162 {
163         unsigned int i;
164         int do_wake = 0;
165
166         for (i = 0; i < nr; i++) {
167                 struct nvmap_handle_ref *ref;
168
169                 if (!ids[i])
170                         continue;
171
172                 nvmap_ref_lock(client);
173                 ref = _nvmap_validate_id_locked(client, ids[i]);
174                 if (ref) {
175                         struct nvmap_handle *h = ref->handle;
176                         int e = atomic_add_unless(&ref->pin, -1, 0);
177
178                         nvmap_ref_unlock(client);
179
180                         if (!e) {
181                                 nvmap_err(client, "%s unpinning unpinned "
182                                           "handle %08lx\n",
183                                           current->group_leader->comm, ids[i]);
184                         } else {
185                                 do_wake |= handle_unpin(client, h);
186                         }
187                 } else {
188                         nvmap_ref_unlock(client);
189                         if (client->super)
190                                 do_wake |= handle_unpin_noref(client, ids[i]);
191                         else
192                                 nvmap_err(client, "%s unpinning invalid "
193                                           "handle %08lx\n",
194                                           current->group_leader->comm, ids[i]);
195                 }
196         }
197
198         if (do_wake)
199                 wake_up(&client->share->pin_wait);
200 }
201
202 /* pins a list of handle_ref objects; same conditions apply as to
203  * _nvmap_handle_pin, but also bumps the pin count of each handle_ref. */
204 int nvmap_pin_ids(struct nvmap_client *client,
205                   unsigned int nr, const unsigned long *ids)
206 {
207         int ret = 0;
208         int cnt = 0;
209         unsigned int i;
210         struct nvmap_handle **h = (struct nvmap_handle **)ids;
211         struct nvmap_handle_ref *ref;
212
213         /* to optimize for the common case (client provided valid handle
214          * references and the pin succeeds), increment the handle_ref pin
215          * count during validation. in error cases, the tree will need to
216          * be re-walked, since the handle_ref is discarded so that an
217          * allocation isn't required. if a handle_ref is not found,
218          * locally validate that the caller has permission to pin the handle;
219          * handle_refs are not created in this case, so it is possible that
220          * if the caller crashes after pinning a global handle, the handle
221          * will be permanently leaked. */
222         nvmap_ref_lock(client);
223         for (i = 0; i < nr && !ret; i++) {
224                 ref = _nvmap_validate_id_locked(client, ids[i]);
225                 if (ref) {
226                         atomic_inc(&ref->pin);
227                         nvmap_handle_get(h[i]);
228                 } else {
229                         struct nvmap_handle *verify;
230                         nvmap_ref_unlock(client);
231                         verify = nvmap_validate_get(client, ids[i]);
232                         if (verify)
233                                 nvmap_warn(client, "%s pinning unreferenced "
234                                            "handle %p\n",
235                                            current->group_leader->comm, h[i]);
236                         else
237                                 ret = -EPERM;
238                         nvmap_ref_lock(client);
239                 }
240         }
241         nvmap_ref_unlock(client);
242
243         nr = i;
244
245         if (ret)
246                 goto out;
247
248         ret = mutex_lock_interruptible(&client->share->pin_lock);
249         if (WARN_ON(ret))
250                 goto out;
251
252         for (cnt = 0; cnt < nr && !ret; cnt++) {
253                 ret = wait_pin_locked(client, h[cnt]);
254         }
255         mutex_unlock(&client->share->pin_lock);
256
257         if (ret) {
258                 int do_wake = 0;
259
260                 for (i = 0; i < cnt; i++)
261                         do_wake |= handle_unpin(client, h[i]);
262
263                 if (do_wake)
264                         wake_up(&client->share->pin_wait);
265
266                 ret = -EINTR;
267         } else {
268                 for (i = 0; i < nr; i++) {
269                         if (h[i]->heap_pgalloc && h[i]->pgalloc.dirty)
270                                 map_iovmm_area(h[i]);
271                 }
272         }
273
274 out:
275         if (ret) {
276                 nvmap_ref_lock(client);
277                 for (i = 0; i < nr; i++) {
278                         ref = _nvmap_validate_id_locked(client, ids[i]);
279                         if (!ref) {
280                                 nvmap_warn(client, "%s freed handle %p "
281                                            "during pinning\n",
282                                            current->group_leader->comm,
283                                            (void *)ids[i]);
284                                 continue;
285                         }
286                         atomic_dec(&ref->pin);
287                 }
288                 nvmap_ref_unlock(client);
289
290                 for (i = cnt; i < nr; i++)
291                         nvmap_handle_put(h[i]);
292         }
293
294         return ret;
295 }
296
297 static phys_addr_t handle_phys(struct nvmap_handle *h)
298 {
299         phys_addr_t addr;
300
301         if (h->heap_pgalloc && h->pgalloc.contig) {
302                 addr = page_to_phys(h->pgalloc.pages[0]);
303         } else if (h->heap_pgalloc) {
304                 BUG_ON(!h->pgalloc.area);
305                 addr = h->pgalloc.area->iovm_start;
306         } else {
307                 addr = h->carveout->base;
308         }
309
310         return addr;
311 }
312
313 /* stores the physical address (+offset) of each handle relocation entry
314  * into its output location. see nvmap_pin_array for more details.
315  *
316  * each entry in arr (i.e., each relocation request) specifies two handles:
317  * the handle to pin (pin), and the handle where the address of pin should be
318  * written (patch). in pseudocode, this loop basically looks like:
319  *
320  * for (i = 0; i < nr; i++) {
321  *     (pin, pin_offset, patch, patch_offset) = arr[i];
322  *     patch[patch_offset] = address_of(pin) + pin_offset;
323  * }
324  */
325 static int nvmap_reloc_pin_array(struct nvmap_client *client,
326                                  const struct nvmap_pinarray_elem *arr,
327                                  int nr, struct nvmap_handle *gather)
328 {
329         struct nvmap_handle *last_patch = NULL;
330         unsigned int last_pfn = 0;
331         pte_t **pte;
332         void *addr;
333         int i;
334
335         pte = nvmap_alloc_pte(client->dev, &addr);
336         if (IS_ERR(pte))
337                 return PTR_ERR(pte);
338
339         for (i = 0; i < nr; i++) {
340                 struct nvmap_handle *patch;
341                 struct nvmap_handle *pin;
342                 phys_addr_t reloc_addr;
343                 phys_addr_t phys;
344                 unsigned int pfn;
345
346                 /* all of the handles are validated and get'ted prior to
347                  * calling this function, so casting is safe here */
348                 pin = (struct nvmap_handle *)arr[i].pin_mem;
349
350                 if (arr[i].patch_mem == (unsigned long)last_patch) {
351                         patch = last_patch;
352                 } else if (arr[i].patch_mem == (unsigned long)gather) {
353                         patch = gather;
354                 } else {
355                         if (last_patch)
356                                 nvmap_handle_put(last_patch);
357
358                         patch = nvmap_get_handle_id(client, arr[i].patch_mem);
359                         if (!patch) {
360                                 nvmap_free_pte(client->dev, pte);
361                                 return -EPERM;
362                         }
363                         last_patch = patch;
364                 }
365
366                 if (patch->heap_pgalloc) {
367                         unsigned int page = arr[i].patch_offset >> PAGE_SHIFT;
368                         phys = page_to_phys(patch->pgalloc.pages[page]);
369                         phys += (arr[i].patch_offset & ~PAGE_MASK);
370                 } else {
371                         phys = patch->carveout->base + arr[i].patch_offset;
372                 }
373
374                 pfn = __phys_to_pfn(phys);
375                 if (pfn != last_pfn) {
376                         pgprot_t prot = nvmap_pgprot(patch, pgprot_kernel);
377                         phys_addr_t kaddr = (phys_addr_t)addr;
378                         set_pte_at(&init_mm, kaddr, *pte, pfn_pte(pfn, prot));
379                         flush_tlb_kernel_page(kaddr);
380                         last_pfn = pfn;
381                 }
382
383                 reloc_addr = handle_phys(pin) + arr[i].pin_offset;
384                 __raw_writel(reloc_addr, addr + (phys & ~PAGE_MASK));
385         }
386
387         nvmap_free_pte(client->dev, pte);
388
389         if (last_patch)
390                 nvmap_handle_put(last_patch);
391
392         wmb();
393
394         return 0;
395 }
396
397 static int nvmap_validate_get_pin_array(struct nvmap_client *client,
398                                         const struct nvmap_pinarray_elem *arr,
399                                         int nr, struct nvmap_handle **h)
400 {
401         int i;
402         int ret = 0;
403         int count = 0;
404
405         nvmap_ref_lock(client);
406
407         for (i = 0; i < nr; i++) {
408                 struct nvmap_handle_ref *ref;
409
410                 if (need_resched()) {
411                         nvmap_ref_unlock(client);
412                         schedule();
413                         nvmap_ref_lock(client);
414                 }
415
416                 ref = _nvmap_validate_id_locked(client, arr[i].pin_mem);
417
418                 if (!ref)
419                         nvmap_warn(client, "falied to validate id\n");
420                 else if (!ref->handle)
421                         nvmap_warn(client, "id had no associated handle\n");
422                 else if (!ref->handle->alloc)
423                         nvmap_warn(client, "handle had no allocation\n");
424
425                 if (!ref || !ref->handle || !ref->handle->alloc) {
426                         ret = -EPERM;
427                         break;
428                 }
429
430                 /* a handle may be referenced multiple times in arr, but
431                  * it will only be pinned once; this ensures that the
432                  * minimum number of sync-queue slots in the host driver
433                  * are dedicated to storing unpin lists, which allows
434                  * for greater parallelism between the CPU and graphics
435                  * processor */
436                 if (ref->handle->flags & NVMAP_HANDLE_VISITED)
437                         continue;
438
439                 ref->handle->flags |= NVMAP_HANDLE_VISITED;
440
441                 h[count] = nvmap_handle_get(ref->handle);
442                 BUG_ON(!h[count]);
443                 count++;
444         }
445
446         nvmap_ref_unlock(client);
447
448         if (ret) {
449                 for (i = 0; i < count; i++) {
450                         h[i]->flags &= ~NVMAP_HANDLE_VISITED;
451                         nvmap_handle_put(h[i]);
452                 }
453         }
454
455         return ret ?: count;
456 }
457
458 /* a typical mechanism host1x clients use for using the Tegra graphics
459  * processor is to build a command buffer which contains relocatable
460  * memory handle commands, and rely on the kernel to convert these in-place
461  * to addresses which are understood by the GPU hardware.
462  *
463  * this is implemented by having clients provide a sideband array
464  * of relocatable handles (+ offsets) and the location in the command
465  * buffer handle to patch with the GPU address when the client submits
466  * its command buffer to the host1x driver.
467  *
468  * the host driver also uses this relocation mechanism internally to
469  * relocate the client's (unpinned) command buffers into host-addressable
470  * memory.
471  *
472  * @client: nvmap_client which should be used for validation; should be
473  *          owned by the process which is submitting command buffers
474  * @gather: special handle for relocated command buffer outputs used
475  *          internally by the host driver. if this handle is encountered
476  *          as an output handle in the relocation array, it is assumed
477  *          to be a known-good output and is not validated.
478  * @arr:    array of ((relocatable handle, offset), (output handle, offset))
479  *          tuples.
480  * @nr:     number of entries in arr
481  * @unique_arr: list of nvmap_handle objects which were pinned by
482  *              nvmap_pin_array. must be unpinned by the caller after the
483  *              command buffers referenced in gather have completed.
484  */
485 int nvmap_pin_array(struct nvmap_client *client, struct nvmap_handle *gather,
486                     const struct nvmap_pinarray_elem *arr, int nr,
487                     struct nvmap_handle **unique_arr)
488 {
489         int count = 0;
490         int pinned = 0;
491         int ret = 0;
492         int i;
493
494         if (mutex_lock_interruptible(&client->share->pin_lock)) {
495                 nvmap_warn(client, "%s interrupted when acquiring pin lock\n",
496                            current->group_leader->comm);
497                 return -EINTR;
498         }
499
500         count = nvmap_validate_get_pin_array(client, arr, nr, unique_arr);
501         if (count < 0) {
502                 mutex_unlock(&client->share->pin_lock);
503                 nvmap_warn(client, "failed to validate pin array\n");
504                 return count;
505         }
506
507         for (i = 0; i < count; i++)
508                 unique_arr[i]->flags &= ~NVMAP_HANDLE_VISITED;
509
510         for (pinned = 0; pinned < count && !ret; pinned++)
511                 ret = wait_pin_locked(client, unique_arr[pinned]);
512
513         mutex_unlock(&client->share->pin_lock);
514
515         if (!ret)
516                 ret = nvmap_reloc_pin_array(client, arr, nr, gather);
517
518         if (WARN_ON(ret)) {
519                 int do_wake = 0;
520
521                 for (i = pinned; i < count; i++)
522                         nvmap_handle_put(unique_arr[i]);
523
524                 for (i = 0; i < pinned; i++)
525                         do_wake |= handle_unpin(client, unique_arr[i]);
526
527                 if (do_wake)
528                         wake_up(&client->share->pin_wait);
529
530                 return ret;
531         } else {
532                 for (i = 0; i < count; i++) {
533                         if (unique_arr[i]->heap_pgalloc &&
534                             unique_arr[i]->pgalloc.dirty)
535                                 map_iovmm_area(unique_arr[i]);
536                 }
537         }
538
539         return count;
540 }
541
542 phys_addr_t nvmap_pin(struct nvmap_client *client,
543                         struct nvmap_handle_ref *ref)
544 {
545         struct nvmap_handle *h;
546         phys_addr_t phys;
547         int ret = 0;
548
549         h = nvmap_handle_get(ref->handle);
550         if (WARN_ON(!h))
551                 return -EINVAL;
552
553         atomic_inc(&ref->pin);
554
555         if (WARN_ON(mutex_lock_interruptible(&client->share->pin_lock))) {
556                 ret = -EINTR;
557         } else {
558                 ret = wait_pin_locked(client, h);
559                 mutex_unlock(&client->share->pin_lock);
560         }
561
562         if (ret) {
563                 atomic_dec(&ref->pin);
564                 nvmap_handle_put(h);
565         } else {
566                 if (h->heap_pgalloc && h->pgalloc.dirty)
567                         map_iovmm_area(h);
568                 phys = handle_phys(h);
569         }
570
571         return ret ?: phys;
572 }
573
574 phys_addr_t nvmap_handle_address(struct nvmap_client *c, unsigned long id)
575 {
576         struct nvmap_handle *h;
577         phys_addr_t phys;
578
579         h = nvmap_get_handle_id(c, id);
580         if (!h)
581                 return -EPERM;
582         mutex_lock(&h->lock);
583         phys = handle_phys(h);
584         mutex_unlock(&h->lock);
585         nvmap_handle_put(h);
586
587         return phys;
588 }
589
590 void nvmap_unpin(struct nvmap_client *client, struct nvmap_handle_ref *ref)
591 {
592         atomic_dec(&ref->pin);
593         if (handle_unpin(client, ref->handle))
594                 wake_up(&client->share->pin_wait);
595 }
596
597 void nvmap_unpin_handles(struct nvmap_client *client,
598                          struct nvmap_handle **h, int nr)
599 {
600         int i;
601         int do_wake = 0;
602
603         for (i = 0; i < nr; i++) {
604                 if (WARN_ON(!h[i]))
605                         continue;
606                 do_wake |= handle_unpin(client, h[i]);
607         }
608
609         if (do_wake)
610                 wake_up(&client->share->pin_wait);
611 }
612
613 void *nvmap_mmap(struct nvmap_handle_ref *ref)
614 {
615         struct nvmap_handle *h;
616         pgprot_t prot;
617         unsigned long adj_size;
618         unsigned long offs;
619         struct vm_struct *v;
620         void *p;
621
622         h = nvmap_handle_get(ref->handle);
623         if (!h)
624                 return NULL;
625
626         prot = nvmap_pgprot(h, pgprot_kernel);
627
628         if (h->heap_pgalloc)
629                 return vm_map_ram(h->pgalloc.pages, h->size >> PAGE_SHIFT,
630                                   -1, prot);
631
632         /* carveout - explicitly map the pfns into a vmalloc area */
633
634         nvmap_usecount_inc(h);
635
636         adj_size = h->carveout->base & ~PAGE_MASK;
637         adj_size += h->size;
638         adj_size = PAGE_ALIGN(adj_size);
639
640         v = alloc_vm_area(adj_size, NULL);
641         if (!v) {
642                 nvmap_usecount_dec(h);
643                 nvmap_handle_put(h);
644                 return NULL;
645         }
646
647         p = v->addr + (h->carveout->base & ~PAGE_MASK);
648
649         for (offs = 0; offs < adj_size; offs += PAGE_SIZE) {
650                 unsigned long addr = (unsigned long) v->addr + offs;
651                 unsigned int pfn;
652                 pgd_t *pgd;
653                 pud_t *pud;
654                 pmd_t *pmd;
655                 pte_t *pte;
656
657                 pfn = __phys_to_pfn(h->carveout->base + offs);
658                 pgd = pgd_offset_k(addr);
659                 pud = pud_alloc(&init_mm, pgd, addr);
660                 if (!pud)
661                         break;
662                 pmd = pmd_alloc(&init_mm, pud, addr);
663                 if (!pmd)
664                         break;
665                 pte = pte_alloc_kernel(pmd, addr);
666                 if (!pte)
667                         break;
668                 set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
669                 flush_tlb_kernel_page(addr);
670         }
671
672         if (offs != adj_size) {
673                 free_vm_area(v);
674                 nvmap_usecount_dec(h);
675                 nvmap_handle_put(h);
676                 return NULL;
677         }
678
679         /* leave the handle ref count incremented by 1, so that
680          * the handle will not be freed while the kernel mapping exists.
681          * nvmap_handle_put will be called by unmapping this address */
682         return p;
683 }
684
685 void nvmap_munmap(struct nvmap_handle_ref *ref, void *addr)
686 {
687         struct nvmap_handle *h;
688
689         if (!ref)
690                 return;
691
692         h = ref->handle;
693
694         if (h->heap_pgalloc) {
695                 vm_unmap_ram(addr, h->size >> PAGE_SHIFT);
696         } else {
697                 struct vm_struct *vm;
698                 addr -= (h->carveout->base & ~PAGE_MASK);
699                 vm = remove_vm_area(addr);
700                 BUG_ON(!vm);
701                 kfree(vm);
702                 nvmap_usecount_dec(h);
703         }
704         nvmap_handle_put(h);
705 }
706
707 struct nvmap_handle_ref *nvmap_alloc(struct nvmap_client *client, size_t size,
708                                      size_t align, unsigned int flags)
709 {
710         const unsigned int default_heap = (NVMAP_HEAP_SYSMEM |
711                                            NVMAP_HEAP_CARVEOUT_GENERIC);
712         struct nvmap_handle_ref *r = NULL;
713         int err;
714
715         r = nvmap_create_handle(client, size);
716         if (IS_ERR(r))
717                 return r;
718
719         err = nvmap_alloc_handle_id(client, nvmap_ref_to_id(r),
720                                     default_heap, align, flags);
721
722         if (err) {
723                 nvmap_free_handle_id(client, nvmap_ref_to_id(r));
724                 return ERR_PTR(err);
725         }
726
727         return r;
728 }
729
730 /* allocates memory with specifed iovm_start address. */
731 struct nvmap_handle_ref *nvmap_alloc_iovm(struct nvmap_client *client,
732         size_t size, size_t align, unsigned int flags, unsigned int iovm_start)
733 {
734         int err;
735         struct nvmap_handle *h;
736         struct nvmap_handle_ref *r;
737         const unsigned int default_heap = NVMAP_HEAP_IOVMM;
738
739         /* size need to be more than one page.
740          * otherwise heap preference would change to system heap.
741          */
742         if (size <= PAGE_SIZE)
743                 size = PAGE_SIZE << 1;
744         r = nvmap_create_handle(client, size);
745         if (IS_ERR_OR_NULL(r))
746                 return r;
747
748         h = r->handle;
749         h->pgalloc.iovm_addr = iovm_start;
750         err = nvmap_alloc_handle_id(client, nvmap_ref_to_id(r),
751                         default_heap, align, flags);
752         if (err)
753                 goto fail;
754
755         err = mutex_lock_interruptible(&client->share->pin_lock);
756         if (WARN_ON(err))
757                 goto fail;
758         err = pin_locked(client, h);
759         mutex_unlock(&client->share->pin_lock);
760         if (err)
761                 goto fail;
762         return r;
763
764 fail:
765         nvmap_free_handle_id(client, nvmap_ref_to_id(r));
766         return ERR_PTR(err);
767 }
768
769 void nvmap_free_iovm(struct nvmap_client *client, struct nvmap_handle_ref *r)
770 {
771         unsigned long ref_id = nvmap_ref_to_id(r);
772
773         nvmap_unpin_ids(client, 1, &ref_id);
774         nvmap_free_handle_id(client, ref_id);
775 }
776
777 void nvmap_free(struct nvmap_client *client, struct nvmap_handle_ref *r)
778 {
779         nvmap_free_handle_id(client, nvmap_ref_to_id(r));
780 }
781
782 /*
783  * create a mapping to the user's buffer and write it
784  * (uses similar logic from nvmap_reloc_pin_array to map the cmdbuf)
785  */
786 int nvmap_patch_word(struct nvmap_client *client,
787                                 struct nvmap_handle *patch,
788                                 u32 patch_offset, u32 patch_value)
789 {
790         phys_addr_t phys;
791         unsigned long kaddr;
792         unsigned int pfn;
793         void *addr;
794         pte_t **pte;
795         pgprot_t prot;
796
797         if (patch_offset >= patch->size) {
798                 nvmap_warn(client, "read/write outside of handle\n");
799                 return -EFAULT;
800         }
801
802         pte = nvmap_alloc_pte(client->dev, &addr);
803         if (IS_ERR(pte))
804                 return PTR_ERR(pte);
805
806         /* derive physaddr of cmdbuf WAIT to patch */
807         if (patch->heap_pgalloc) {
808                 unsigned int page = patch_offset >> PAGE_SHIFT;
809                 phys = page_to_phys(patch->pgalloc.pages[page]);
810                 phys += (patch_offset & ~PAGE_MASK);
811         } else {
812                 phys = patch->carveout->base + patch_offset;
813         }
814
815         pfn = __phys_to_pfn(phys);
816         prot = nvmap_pgprot(patch, pgprot_kernel);
817         kaddr = (unsigned long)addr;
818
819         /* write PTE, so addr points to cmdbuf PFN */
820         set_pte_at(&init_mm, kaddr, *pte, pfn_pte(pfn, prot));
821         flush_tlb_kernel_page(kaddr);
822
823         /* write patch_value to addr + page offset */
824         __raw_writel(patch_value, addr + (phys & ~PAGE_MASK));
825
826         nvmap_free_pte(client->dev, pte);
827         wmb();
828         return 0;
829 }