ARM: tegra: Use proper type for physical addresses
[linux-2.6.git] / drivers / video / tegra / nvmap / nvmap.c
1 /*
2  * drivers/video/tegra/nvmap/nvmap.c
3  *
4  * Memory manager for Tegra GPU
5  *
6  * Copyright (c) 2009-2011, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include <linux/err.h>
24 #include <linux/highmem.h>
25 #include <linux/io.h>
26 #include <linux/rbtree.h>
27 #include <linux/vmalloc.h>
28 #include <linux/wait.h>
29
30 #include <asm/pgtable.h>
31 #include <asm/tlbflush.h>
32
33 #include <mach/iovmm.h>
34 #include <mach/nvmap.h>
35
36 #include "nvmap.h"
37 #include "nvmap_mru.h"
38
39 /* private nvmap_handle flag for pinning duplicate detection */
40 #define NVMAP_HANDLE_VISITED (0x1ul << 31)
41
42 /* map the backing pages for a heap_pgalloc handle into its IOVMM area */
43 static void map_iovmm_area(struct nvmap_handle *h)
44 {
45         tegra_iovmm_addr_t va;
46         unsigned long i;
47
48         BUG_ON(!h->heap_pgalloc || !h->pgalloc.area);
49         BUG_ON(h->size & ~PAGE_MASK);
50         WARN_ON(!h->pgalloc.dirty);
51
52         for (va = h->pgalloc.area->iovm_start, i = 0;
53              va < (h->pgalloc.area->iovm_start + h->size);
54              i++, va += PAGE_SIZE) {
55                 BUG_ON(!pfn_valid(page_to_pfn(h->pgalloc.pages[i])));
56                 tegra_iovmm_vm_insert_pfn(h->pgalloc.area, va,
57                                           page_to_pfn(h->pgalloc.pages[i]));
58         }
59         h->pgalloc.dirty = false;
60 }
61
62 /* must be called inside nvmap_pin_lock, to ensure that an entire stream
63  * of pins will complete without racing with a second stream. handle should
64  * have nvmap_handle_get (or nvmap_validate_get) called before calling
65  * this function. */
66 static int pin_locked(struct nvmap_client *client, struct nvmap_handle *h)
67 {
68         struct tegra_iovmm_area *area;
69         BUG_ON(!h->alloc);
70
71         if (atomic_inc_return(&h->pin) == 1) {
72                 if (h->heap_pgalloc && !h->pgalloc.contig) {
73                         area = nvmap_handle_iovmm(client, h);
74                         if (!area) {
75                                 /* no race here, inside the pin mutex */
76                                 atomic_dec(&h->pin);
77                                 return -ENOMEM;
78                         }
79                         if (area != h->pgalloc.area)
80                                 h->pgalloc.dirty = true;
81                         h->pgalloc.area = area;
82                 }
83         }
84         return 0;
85 }
86
87 static int wait_pin_locked(struct nvmap_client *client, struct nvmap_handle *h)
88 {
89         int ret = 0;
90
91         ret = pin_locked(client, h);
92
93         if (ret) {
94                 ret = wait_event_interruptible(client->share->pin_wait,
95                                                !pin_locked(client, h));
96         }
97
98         return ret ? -EINTR : 0;
99
100 }
101
102 /* doesn't need to be called inside nvmap_pin_lock, since this will only
103  * expand the available VM area */
104 static int handle_unpin(struct nvmap_client *client, struct nvmap_handle *h)
105 {
106         int ret = 0;
107
108         nvmap_mru_lock(client->share);
109
110         if (atomic_read(&h->pin) == 0) {
111                 nvmap_err(client, "%s unpinning unpinned handle %p\n",
112                           current->group_leader->comm, h);
113                 nvmap_mru_unlock(client->share);
114                 return 0;
115         }
116
117         BUG_ON(!h->alloc);
118
119         if (!atomic_dec_return(&h->pin)) {
120                 if (h->heap_pgalloc && h->pgalloc.area) {
121                         /* if a secure handle is clean (i.e., mapped into
122                          * IOVMM, it needs to be zapped on unpin. */
123                         if (h->secure && !h->pgalloc.dirty) {
124                                 tegra_iovmm_zap_vm(h->pgalloc.area);
125                                 h->pgalloc.dirty = true;
126                         }
127                         nvmap_mru_insert_locked(client->share, h);
128                         ret = 1;
129                 }
130         }
131
132         nvmap_mru_unlock(client->share);
133
134         nvmap_handle_put(h);
135         return ret;
136 }
137
138 static int handle_unpin_noref(struct nvmap_client *client, unsigned long id)
139 {
140         struct nvmap_handle *h;
141         int w;
142
143         h = nvmap_validate_get(client, id);
144         if (unlikely(!h)) {
145                 nvmap_err(client, "%s attempting to unpin invalid handle %p\n",
146                           current->group_leader->comm, (void *)id);
147                 return 0;
148         }
149
150         nvmap_err(client, "%s unpinning unreferenced handle %p\n",
151                   current->group_leader->comm, h);
152         WARN_ON(1);
153
154         w = handle_unpin(client, h);
155         nvmap_handle_put(h);
156         return w;
157 }
158
159 void nvmap_unpin_ids(struct nvmap_client *client,
160                      unsigned int nr, const unsigned long *ids)
161 {
162         unsigned int i;
163         int do_wake = 0;
164
165         for (i = 0; i < nr; i++) {
166                 struct nvmap_handle_ref *ref;
167
168                 if (!ids[i])
169                         continue;
170
171                 nvmap_ref_lock(client);
172                 ref = _nvmap_validate_id_locked(client, ids[i]);
173                 if (ref) {
174                         struct nvmap_handle *h = ref->handle;
175                         int e = atomic_add_unless(&ref->pin, -1, 0);
176
177                         nvmap_ref_unlock(client);
178
179                         if (!e) {
180                                 nvmap_err(client, "%s unpinning unpinned "
181                                           "handle %08lx\n",
182                                           current->group_leader->comm, ids[i]);
183                         } else {
184                                 do_wake |= handle_unpin(client, h);
185                         }
186                 } else {
187                         nvmap_ref_unlock(client);
188                         if (client->super)
189                                 do_wake |= handle_unpin_noref(client, ids[i]);
190                         else
191                                 nvmap_err(client, "%s unpinning invalid "
192                                           "handle %08lx\n",
193                                           current->group_leader->comm, ids[i]);
194                 }
195         }
196
197         if (do_wake)
198                 wake_up(&client->share->pin_wait);
199 }
200
201 /* pins a list of handle_ref objects; same conditions apply as to
202  * _nvmap_handle_pin, but also bumps the pin count of each handle_ref. */
203 int nvmap_pin_ids(struct nvmap_client *client,
204                   unsigned int nr, const unsigned long *ids)
205 {
206         int ret = 0;
207         int cnt = 0;
208         unsigned int i;
209         struct nvmap_handle **h = (struct nvmap_handle **)ids;
210         struct nvmap_handle_ref *ref;
211
212         /* to optimize for the common case (client provided valid handle
213          * references and the pin succeeds), increment the handle_ref pin
214          * count during validation. in error cases, the tree will need to
215          * be re-walked, since the handle_ref is discarded so that an
216          * allocation isn't required. if a handle_ref is not found,
217          * locally validate that the caller has permission to pin the handle;
218          * handle_refs are not created in this case, so it is possible that
219          * if the caller crashes after pinning a global handle, the handle
220          * will be permanently leaked. */
221         nvmap_ref_lock(client);
222         for (i = 0; i < nr && !ret; i++) {
223                 ref = _nvmap_validate_id_locked(client, ids[i]);
224                 if (ref) {
225                         atomic_inc(&ref->pin);
226                         nvmap_handle_get(h[i]);
227                 } else {
228                         struct nvmap_handle *verify;
229                         nvmap_ref_unlock(client);
230                         verify = nvmap_validate_get(client, ids[i]);
231                         if (verify)
232                                 nvmap_warn(client, "%s pinning unreferenced "
233                                            "handle %p\n",
234                                            current->group_leader->comm, h[i]);
235                         else
236                                 ret = -EPERM;
237                         nvmap_ref_lock(client);
238                 }
239         }
240         nvmap_ref_unlock(client);
241
242         nr = i;
243
244         if (ret)
245                 goto out;
246
247         ret = mutex_lock_interruptible(&client->share->pin_lock);
248         if (WARN_ON(ret))
249                 goto out;
250
251         for (cnt = 0; cnt < nr && !ret; cnt++) {
252                 ret = wait_pin_locked(client, h[cnt]);
253         }
254         mutex_unlock(&client->share->pin_lock);
255
256         if (ret) {
257                 int do_wake = 0;
258
259                 for (i = 0; i < cnt; i++)
260                         do_wake |= handle_unpin(client, h[i]);
261
262                 if (do_wake)
263                         wake_up(&client->share->pin_wait);
264
265                 ret = -EINTR;
266         } else {
267                 for (i = 0; i < nr; i++) {
268                         if (h[i]->heap_pgalloc && h[i]->pgalloc.dirty)
269                                 map_iovmm_area(h[i]);
270                 }
271         }
272
273 out:
274         if (ret) {
275                 nvmap_ref_lock(client);
276                 for (i = 0; i < nr; i++) {
277                         ref = _nvmap_validate_id_locked(client, ids[i]);
278                         if (!ref) {
279                                 nvmap_warn(client, "%s freed handle %p "
280                                            "during pinning\n",
281                                            current->group_leader->comm,
282                                            (void *)ids[i]);
283                                 continue;
284                         }
285                         atomic_dec(&ref->pin);
286                 }
287                 nvmap_ref_unlock(client);
288
289                 for (i = cnt; i < nr; i++)
290                         nvmap_handle_put(h[i]);
291         }
292
293         return ret;
294 }
295
296 static phys_addr_t handle_phys(struct nvmap_handle *h)
297 {
298         phys_addr_t addr;
299
300         if (h->heap_pgalloc && h->pgalloc.contig) {
301                 addr = page_to_phys(h->pgalloc.pages[0]);
302         } else if (h->heap_pgalloc) {
303                 BUG_ON(!h->pgalloc.area);
304                 addr = h->pgalloc.area->iovm_start;
305         } else {
306                 addr = h->carveout->base;
307         }
308
309         return addr;
310 }
311
312 /* stores the physical address (+offset) of each handle relocation entry
313  * into its output location. see nvmap_pin_array for more details.
314  *
315  * each entry in arr (i.e., each relocation request) specifies two handles:
316  * the handle to pin (pin), and the handle where the address of pin should be
317  * written (patch). in pseudocode, this loop basically looks like:
318  *
319  * for (i = 0; i < nr; i++) {
320  *     (pin, pin_offset, patch, patch_offset) = arr[i];
321  *     patch[patch_offset] = address_of(pin) + pin_offset;
322  * }
323  */
324 static int nvmap_reloc_pin_array(struct nvmap_client *client,
325                                  const struct nvmap_pinarray_elem *arr,
326                                  int nr, struct nvmap_handle *gather)
327 {
328         struct nvmap_handle *last_patch = NULL;
329         unsigned int last_pfn = 0;
330         pte_t **pte;
331         void *addr;
332         int i;
333
334         pte = nvmap_alloc_pte(client->dev, &addr);
335         if (IS_ERR(pte))
336                 return PTR_ERR(pte);
337
338         for (i = 0; i < nr; i++) {
339                 struct nvmap_handle *patch;
340                 struct nvmap_handle *pin;
341                 phys_addr_t reloc_addr;
342                 phys_addr_t phys;
343                 unsigned int pfn;
344
345                 /* all of the handles are validated and get'ted prior to
346                  * calling this function, so casting is safe here */
347                 pin = (struct nvmap_handle *)arr[i].pin_mem;
348
349                 if (arr[i].patch_mem == (unsigned long)last_patch) {
350                         patch = last_patch;
351                 } else if (arr[i].patch_mem == (unsigned long)gather) {
352                         patch = gather;
353                 } else {
354                         if (last_patch)
355                                 nvmap_handle_put(last_patch);
356
357                         patch = nvmap_get_handle_id(client, arr[i].patch_mem);
358                         if (!patch) {
359                                 nvmap_free_pte(client->dev, pte);
360                                 return -EPERM;
361                         }
362                         last_patch = patch;
363                 }
364
365                 if (patch->heap_pgalloc) {
366                         unsigned int page = arr[i].patch_offset >> PAGE_SHIFT;
367                         phys = page_to_phys(patch->pgalloc.pages[page]);
368                         phys += (arr[i].patch_offset & ~PAGE_MASK);
369                 } else {
370                         phys = patch->carveout->base + arr[i].patch_offset;
371                 }
372
373                 pfn = __phys_to_pfn(phys);
374                 if (pfn != last_pfn) {
375                         pgprot_t prot = nvmap_pgprot(patch, pgprot_kernel);
376                         phys_addr_t kaddr = (phys_addr_t)addr;
377                         set_pte_at(&init_mm, kaddr, *pte, pfn_pte(pfn, prot));
378                         flush_tlb_kernel_page(kaddr);
379                         last_pfn = pfn;
380                 }
381
382                 reloc_addr = handle_phys(pin) + arr[i].pin_offset;
383                 __raw_writel(reloc_addr, addr + (phys & ~PAGE_MASK));
384         }
385
386         nvmap_free_pte(client->dev, pte);
387
388         if (last_patch)
389                 nvmap_handle_put(last_patch);
390
391         wmb();
392
393         return 0;
394 }
395
396 static int nvmap_validate_get_pin_array(struct nvmap_client *client,
397                                         const struct nvmap_pinarray_elem *arr,
398                                         int nr, struct nvmap_handle **h)
399 {
400         int i;
401         int ret = 0;
402         int count = 0;
403
404         nvmap_ref_lock(client);
405
406         for (i = 0; i < nr; i++) {
407                 struct nvmap_handle_ref *ref;
408
409                 if (need_resched()) {
410                         nvmap_ref_unlock(client);
411                         schedule();
412                         nvmap_ref_lock(client);
413                 }
414
415                 ref = _nvmap_validate_id_locked(client, arr[i].pin_mem);
416
417                 if (!ref)
418                         nvmap_warn(client, "falied to validate id\n");
419                 else if (!ref->handle)
420                         nvmap_warn(client, "id had no associated handle\n");
421                 else if (!ref->handle->alloc)
422                         nvmap_warn(client, "handle had no allocation\n");
423
424                 if (!ref || !ref->handle || !ref->handle->alloc) {
425                         ret = -EPERM;
426                         break;
427                 }
428
429                 /* a handle may be referenced multiple times in arr, but
430                  * it will only be pinned once; this ensures that the
431                  * minimum number of sync-queue slots in the host driver
432                  * are dedicated to storing unpin lists, which allows
433                  * for greater parallelism between the CPU and graphics
434                  * processor */
435                 if (ref->handle->flags & NVMAP_HANDLE_VISITED)
436                         continue;
437
438                 ref->handle->flags |= NVMAP_HANDLE_VISITED;
439
440                 h[count] = nvmap_handle_get(ref->handle);
441                 BUG_ON(!h[count]);
442                 count++;
443         }
444
445         nvmap_ref_unlock(client);
446
447         if (ret) {
448                 for (i = 0; i < count; i++) {
449                         h[i]->flags &= ~NVMAP_HANDLE_VISITED;
450                         nvmap_handle_put(h[i]);
451                 }
452         }
453
454         return ret ?: count;
455 }
456
457 /* a typical mechanism host1x clients use for using the Tegra graphics
458  * processor is to build a command buffer which contains relocatable
459  * memory handle commands, and rely on the kernel to convert these in-place
460  * to addresses which are understood by the GPU hardware.
461  *
462  * this is implemented by having clients provide a sideband array
463  * of relocatable handles (+ offsets) and the location in the command
464  * buffer handle to patch with the GPU address when the client submits
465  * its command buffer to the host1x driver.
466  *
467  * the host driver also uses this relocation mechanism internally to
468  * relocate the client's (unpinned) command buffers into host-addressable
469  * memory.
470  *
471  * @client: nvmap_client which should be used for validation; should be
472  *          owned by the process which is submitting command buffers
473  * @gather: special handle for relocated command buffer outputs used
474  *          internally by the host driver. if this handle is encountered
475  *          as an output handle in the relocation array, it is assumed
476  *          to be a known-good output and is not validated.
477  * @arr:    array of ((relocatable handle, offset), (output handle, offset))
478  *          tuples.
479  * @nr:     number of entries in arr
480  * @unique_arr: list of nvmap_handle objects which were pinned by
481  *              nvmap_pin_array. must be unpinned by the caller after the
482  *              command buffers referenced in gather have completed.
483  */
484 int nvmap_pin_array(struct nvmap_client *client, struct nvmap_handle *gather,
485                     const struct nvmap_pinarray_elem *arr, int nr,
486                     struct nvmap_handle **unique_arr)
487 {
488         int count = 0;
489         int pinned = 0;
490         int ret = 0;
491         int i;
492
493         if (mutex_lock_interruptible(&client->share->pin_lock)) {
494                 nvmap_warn(client, "%s interrupted when acquiring pin lock\n",
495                            current->group_leader->comm);
496                 return -EINTR;
497         }
498
499         count = nvmap_validate_get_pin_array(client, arr, nr, unique_arr);
500         if (count < 0) {
501                 mutex_unlock(&client->share->pin_lock);
502                 nvmap_warn(client, "failed to validate pin array\n");
503                 return count;
504         }
505
506         for (i = 0; i < count; i++)
507                 unique_arr[i]->flags &= ~NVMAP_HANDLE_VISITED;
508
509         for (pinned = 0; pinned < count && !ret; pinned++)
510                 ret = wait_pin_locked(client, unique_arr[pinned]);
511
512         mutex_unlock(&client->share->pin_lock);
513
514         if (!ret)
515                 ret = nvmap_reloc_pin_array(client, arr, nr, gather);
516
517         if (WARN_ON(ret)) {
518                 int do_wake = 0;
519
520                 for (i = pinned; i < count; i++)
521                         nvmap_handle_put(unique_arr[i]);
522
523                 for (i = 0; i < pinned; i++)
524                         do_wake |= handle_unpin(client, unique_arr[i]);
525
526                 if (do_wake)
527                         wake_up(&client->share->pin_wait);
528
529                 return ret;
530         } else {
531                 for (i = 0; i < count; i++) {
532                         if (unique_arr[i]->heap_pgalloc &&
533                             unique_arr[i]->pgalloc.dirty)
534                                 map_iovmm_area(unique_arr[i]);
535                 }
536         }
537
538         return count;
539 }
540
541 phys_addr_t nvmap_pin(struct nvmap_client *client,
542                         struct nvmap_handle_ref *ref)
543 {
544         struct nvmap_handle *h;
545         phys_addr_t phys;
546         int ret = 0;
547
548         h = nvmap_handle_get(ref->handle);
549         if (WARN_ON(!h))
550                 return -EINVAL;
551
552         atomic_inc(&ref->pin);
553
554         if (WARN_ON(mutex_lock_interruptible(&client->share->pin_lock))) {
555                 ret = -EINTR;
556         } else {
557                 ret = wait_pin_locked(client, h);
558                 mutex_unlock(&client->share->pin_lock);
559         }
560
561         if (ret) {
562                 atomic_dec(&ref->pin);
563                 nvmap_handle_put(h);
564         } else {
565                 if (h->heap_pgalloc && h->pgalloc.dirty)
566                         map_iovmm_area(h);
567                 phys = handle_phys(h);
568         }
569
570         return ret ?: phys;
571 }
572
573 phys_addr_t nvmap_handle_address(struct nvmap_client *c, unsigned long id)
574 {
575         struct nvmap_handle *h;
576         phys_addr_t phys;
577
578         h = nvmap_get_handle_id(c, id);
579         if (!h)
580                 return -EPERM;
581         mutex_lock(&h->lock);
582         phys = handle_phys(h);
583         mutex_unlock(&h->lock);
584         nvmap_handle_put(h);
585
586         return phys;
587 }
588
589 void nvmap_unpin(struct nvmap_client *client, struct nvmap_handle_ref *ref)
590 {
591         atomic_dec(&ref->pin);
592         if (handle_unpin(client, ref->handle))
593                 wake_up(&client->share->pin_wait);
594 }
595
596 void nvmap_unpin_handles(struct nvmap_client *client,
597                          struct nvmap_handle **h, int nr)
598 {
599         int i;
600         int do_wake = 0;
601
602         for (i = 0; i < nr; i++) {
603                 if (WARN_ON(!h[i]))
604                         continue;
605                 do_wake |= handle_unpin(client, h[i]);
606         }
607
608         if (do_wake)
609                 wake_up(&client->share->pin_wait);
610 }
611
612 void *nvmap_mmap(struct nvmap_handle_ref *ref)
613 {
614         struct nvmap_handle *h;
615         pgprot_t prot;
616         unsigned long adj_size;
617         unsigned long offs;
618         struct vm_struct *v;
619         void *p;
620
621         h = nvmap_handle_get(ref->handle);
622         if (!h)
623                 return NULL;
624
625         prot = nvmap_pgprot(h, pgprot_kernel);
626
627         if (h->heap_pgalloc)
628                 return vm_map_ram(h->pgalloc.pages, h->size >> PAGE_SHIFT,
629                                   -1, prot);
630
631         /* carveout - explicitly map the pfns into a vmalloc area */
632
633         nvmap_usecount_inc(h);
634
635         adj_size = h->carveout->base & ~PAGE_MASK;
636         adj_size += h->size;
637         adj_size = PAGE_ALIGN(adj_size);
638
639         v = alloc_vm_area(adj_size);
640         if (!v) {
641                 nvmap_usecount_dec(h);
642                 nvmap_handle_put(h);
643                 return NULL;
644         }
645
646         p = v->addr + (h->carveout->base & ~PAGE_MASK);
647
648         for (offs = 0; offs < adj_size; offs += PAGE_SIZE) {
649                 unsigned long addr = (unsigned long) v->addr + offs;
650                 unsigned int pfn;
651                 pgd_t *pgd;
652                 pud_t *pud;
653                 pmd_t *pmd;
654                 pte_t *pte;
655
656                 pfn = __phys_to_pfn(h->carveout->base + offs);
657                 pgd = pgd_offset_k(addr);
658                 pud = pud_alloc(&init_mm, pgd, addr);
659                 if (!pud)
660                         break;
661                 pmd = pmd_alloc(&init_mm, pud, addr);
662                 if (!pmd)
663                         break;
664                 pte = pte_alloc_kernel(pmd, addr);
665                 if (!pte)
666                         break;
667                 set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
668                 flush_tlb_kernel_page(addr);
669         }
670
671         if (offs != adj_size) {
672                 free_vm_area(v);
673                 nvmap_usecount_dec(h);
674                 nvmap_handle_put(h);
675                 return NULL;
676         }
677
678         /* leave the handle ref count incremented by 1, so that
679          * the handle will not be freed while the kernel mapping exists.
680          * nvmap_handle_put will be called by unmapping this address */
681         return p;
682 }
683
684 void nvmap_munmap(struct nvmap_handle_ref *ref, void *addr)
685 {
686         struct nvmap_handle *h;
687
688         if (!ref)
689                 return;
690
691         h = ref->handle;
692
693         if (h->heap_pgalloc) {
694                 vm_unmap_ram(addr, h->size >> PAGE_SHIFT);
695         } else {
696                 struct vm_struct *vm;
697                 addr -= (h->carveout->base & ~PAGE_MASK);
698                 vm = remove_vm_area(addr);
699                 BUG_ON(!vm);
700                 nvmap_usecount_dec(h);
701         }
702         nvmap_handle_put(h);
703 }
704
705 struct nvmap_handle_ref *nvmap_alloc(struct nvmap_client *client, size_t size,
706                                      size_t align, unsigned int flags)
707 {
708         const unsigned int default_heap = (NVMAP_HEAP_SYSMEM |
709                                            NVMAP_HEAP_CARVEOUT_GENERIC);
710         struct nvmap_handle_ref *r = NULL;
711         int err;
712
713         r = nvmap_create_handle(client, size);
714         if (IS_ERR(r))
715                 return r;
716
717         err = nvmap_alloc_handle_id(client, nvmap_ref_to_id(r),
718                                     default_heap, align, flags);
719
720         if (err) {
721                 nvmap_free_handle_id(client, nvmap_ref_to_id(r));
722                 return ERR_PTR(err);
723         }
724
725         return r;
726 }
727
728 void nvmap_free(struct nvmap_client *client, struct nvmap_handle_ref *r)
729 {
730         nvmap_free_handle_id(client, nvmap_ref_to_id(r));
731 }