arm: tegra: nvmap: Update nvmap_alloc api.
[linux-2.6.git] / drivers / gpu / ion / tegra / tegra_ion.c
1 /*
2  * drivers/gpu/tegra/tegra_ion.c
3  *
4  * Copyright (C) 2011 Google, Inc.
5  * Copyright (C) 2011, NVIDIA Corporation.
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17
18 #define pr_fmt(fmt)     "%s():%d: " fmt, __func__, __LINE__
19
20 #include <linux/err.h>
21 #include <linux/ion.h>
22 #include <linux/tegra_ion.h>
23 #include <linux/platform_device.h>
24 #include <linux/slab.h>
25 #include <linux/uaccess.h>
26 #include <linux/syscalls.h>
27 #include <linux/io.h>
28 #include "../ion_priv.h"
29
30 #define CLIENT_HEAP_MASK 0xFFFFFFFF
31 #define HEAP_FLAGS 0xFF
32
33 #if !defined(CONFIG_TEGRA_NVMAP)
34 #include "mach/nvmap.h"
35 struct nvmap_device *nvmap_dev;
36 #endif
37
38 static struct ion_device *idev;
39 static int num_heaps;
40 static struct ion_heap **heaps;
41
42 static int tegra_ion_pin(struct ion_client *client,
43                                 unsigned int cmd,
44                                 unsigned long arg)
45 {
46         struct tegra_ion_pin_data data;
47         int ret;
48         struct ion_handle *on_stack[16];
49         struct ion_handle **refs = on_stack;
50         int i;
51         bool valid_handle;
52
53         if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
54                 return -EFAULT;
55         if (data.count) {
56                 size_t bytes = data.count * sizeof(struct ion_handle *);
57
58                 if (data.count > ARRAY_SIZE(on_stack))
59                         refs = kmalloc(data.count * sizeof(*refs), GFP_KERNEL);
60                 else
61                         refs = on_stack;
62                 if (!refs)
63                         return -ENOMEM;
64                 if (copy_from_user(refs, (void *)data.handles, bytes)) {
65                         ret = -EFAULT;
66                         goto err;
67                 }
68         } else
69                 return -EINVAL;
70
71         mutex_lock(&client->lock);
72         for (i = 0; i < data.count; i++) {
73                 /* Ignore NULL pointers during unpin operation. */
74                 if (!refs[i] && cmd == TEGRA_ION_UNPIN)
75                         continue;
76                 valid_handle = ion_handle_validate(client, refs[i]);
77                 if (!valid_handle) {
78                         WARN(1, "invalid handle passed h=0x%x", (u32)refs[i]);
79                         mutex_unlock(&client->lock);
80                         ret = -EINVAL;
81                         goto err;
82                 }
83         }
84         mutex_unlock(&client->lock);
85
86         if (cmd == TEGRA_ION_PIN) {
87                 ion_phys_addr_t addr;
88                 size_t len;
89
90                 for (i = 0; i < data.count; i++) {
91                         ret = ion_phys(client, refs[i], &addr, &len);
92                         if (ret)
93                                 goto err;
94                         ion_handle_get(refs[i]);
95                         ret = put_user(addr, &data.addr[i]);
96                         if (ret)
97                                 return ret;
98                 }
99         } else if (cmd == TEGRA_ION_UNPIN) {
100                 for (i = 0; i < data.count; i++) {
101                         if (refs[i])
102                                 ion_handle_put(refs[i]);
103                 }
104         }
105
106 err:
107         if (ret) {
108                 pr_err("error, ret=0x%x", ret);
109                 /* FIXME: undo pinning. */
110         }
111         if (refs != on_stack)
112                 kfree(refs);
113         return ret;
114 }
115
116 static int tegra_ion_alloc_from_id(struct ion_client *client,
117                                     unsigned int cmd,
118                                     unsigned long arg)
119 {
120         struct tegra_ion_id_data data;
121         struct ion_buffer *buffer;
122         struct tegra_ion_id_data *user_data = (struct tegra_ion_id_data *)arg;
123
124         if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
125                 return -EFAULT;
126         buffer = (struct ion_buffer *)data.id;
127         data.handle = ion_import(client, buffer);
128         data.size = buffer->size;
129         if (put_user(data.handle, &user_data->handle))
130                 return -EFAULT;
131         if (put_user(data.size, &user_data->size))
132                 return -EFAULT;
133         return 0;
134 }
135
136 static int tegra_ion_get_id(struct ion_client *client,
137                                             unsigned int cmd,
138                                             unsigned long arg)
139 {
140         bool valid_handle;
141         struct tegra_ion_id_data data;
142         struct tegra_ion_id_data *user_data = (struct tegra_ion_id_data *)arg;
143
144         if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
145                 return -EFAULT;
146
147         mutex_lock(&client->lock);
148         valid_handle = ion_handle_validate(client, data.handle);
149         mutex_unlock(&client->lock);
150
151         if (!valid_handle) {
152                 WARN(1, "invalid handle passed\n");
153                 return -EINVAL;
154         }
155
156         pr_debug("h=0x%x, b=0x%x, bref=%d",
157                 (u32)data.handle, (u32)data.handle->buffer,
158                 atomic_read(&data.handle->buffer->ref.refcount));
159         if (put_user((unsigned long)ion_handle_buffer(data.handle),
160                         &user_data->id))
161                 return -EFAULT;
162         return 0;
163 }
164
165 static int tegra_ion_cache_maint(struct ion_client *client,
166                                  unsigned int cmd,
167                                  unsigned long arg)
168 {
169         wmb();
170         return 0;
171 }
172
173 static int tegra_ion_rw(struct ion_client *client,
174                                 unsigned int cmd,
175                                 unsigned long arg)
176 {
177         bool valid_handle;
178         struct tegra_ion_rw_data data;
179         char *kern_addr, *src;
180         int ret = 0;
181         size_t copied = 0;
182
183         if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
184                 return -EFAULT;
185
186         if (!data.handle || !data.addr || !data.count || !data.elem_size)
187                 return -EINVAL;
188
189         mutex_lock(&client->lock);
190         valid_handle = ion_handle_validate(client, data.handle);
191         mutex_unlock(&client->lock);
192
193         if (!valid_handle) {
194                 WARN(1, "%s: invalid handle passed to get id.\n", __func__);
195                 return -EINVAL;
196         }
197
198         if (data.elem_size == data.mem_stride &&
199             data.elem_size == data.user_stride) {
200                 data.elem_size *= data.count;
201                 data.mem_stride = data.elem_size;
202                 data.user_stride = data.elem_size;
203                 data.count = 1;
204         }
205
206         kern_addr = ion_map_kernel(client, data.handle);
207
208         while (data.count--) {
209                 if (data.offset + data.elem_size > data.handle->buffer->size) {
210                         WARN(1, "read/write outside of handle\n");
211                         ret = -EFAULT;
212                         break;
213                 }
214
215                 src = kern_addr + data.offset;
216                 if (cmd == TEGRA_ION_READ)
217                         ret = copy_to_user((void *)data.addr,
218                                             src, data.elem_size);
219                 else
220                         ret = copy_from_user(src,
221                                             (void *)data.addr, data.elem_size);
222
223                 if (ret)
224                         break;
225
226                 copied += data.elem_size;
227                 data.addr += data.user_stride;
228                 data.offset += data.mem_stride;
229         }
230
231         ion_unmap_kernel(client, data.handle);
232         return ret;
233 }
234
235 static int tegra_ion_get_param(struct ion_client *client,
236                                         unsigned int cmd,
237                                         unsigned long arg)
238 {
239         bool valid_handle;
240         struct tegra_ion_get_params_data data;
241         struct tegra_ion_get_params_data *user_data =
242                                 (struct tegra_ion_get_params_data *)arg;
243         struct ion_buffer *buffer;
244
245         if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
246                 return -EFAULT;
247
248         mutex_lock(&client->lock);
249         valid_handle = ion_handle_validate(client, data.handle);
250         mutex_unlock(&client->lock);
251
252         if (!valid_handle) {
253                 WARN(1, "%s: invalid handle passed to get id.\n", __func__);
254                 return -EINVAL;
255         }
256
257         buffer = ion_handle_buffer(data.handle);
258         data.align = 4096;
259         data.heap = 1;
260         ion_phys(client, data.handle, &data.addr, &data.size);
261
262         if (copy_to_user(user_data, &data, sizeof(data)))
263                 return -EFAULT;
264
265         return 0;
266 }
267
268 static long tegra_ion_ioctl(struct ion_client *client,
269                                    unsigned int cmd,
270                                    unsigned long arg)
271 {
272         int ret = -ENOTTY;
273
274         switch (cmd) {
275         case TEGRA_ION_ALLOC_FROM_ID:
276                 ret = tegra_ion_alloc_from_id(client, cmd, arg);
277                 break;
278         case TEGRA_ION_GET_ID:
279                 ret = tegra_ion_get_id(client, cmd, arg);
280                 break;
281         case TEGRA_ION_PIN:
282         case TEGRA_ION_UNPIN:
283                 ret = tegra_ion_pin(client, cmd, arg);
284                 break;
285         case TEGRA_ION_CACHE_MAINT:
286                 ret = tegra_ion_cache_maint(client, cmd, arg);
287                 break;
288         case TEGRA_ION_READ:
289         case TEGRA_ION_WRITE:
290                 ret = tegra_ion_rw(client, cmd, arg);
291                 break;
292         case TEGRA_ION_GET_PARAM:
293                 ret = tegra_ion_get_param(client, cmd, arg);
294                 break;
295         default:
296                 WARN(1, "Unknown custom ioctl\n");
297                 return -ENOTTY;
298         }
299         return ret;
300 }
301
302 int tegra_ion_probe(struct platform_device *pdev)
303 {
304         struct ion_platform_data *pdata = pdev->dev.platform_data;
305         int i;
306
307         num_heaps = pdata->nr;
308
309         heaps = kzalloc(sizeof(struct ion_heap *) * pdata->nr, GFP_KERNEL);
310
311         idev = ion_device_create(tegra_ion_ioctl);
312         if (IS_ERR_OR_NULL(idev)) {
313                 kfree(heaps);
314                 return PTR_ERR(idev);
315         }
316
317         /* create the heaps as specified in the board file */
318         for (i = 0; i < num_heaps; i++) {
319                 struct ion_platform_heap *heap_data = &pdata->heaps[i];
320
321                 heaps[i] = ion_heap_create(heap_data);
322                 if (IS_ERR_OR_NULL(heaps[i])) {
323                         pr_warn("%s(type:%d id:%d) isn't supported\n",
324                                 heap_data->name,
325                                 heap_data->type, heap_data->id);
326                         continue;
327                 }
328                 ion_device_add_heap(idev, heaps[i]);
329         }
330         platform_set_drvdata(pdev, idev);
331 #if !defined(CONFIG_TEGRA_NVMAP)
332         nvmap_dev = (struct nvmap_device *)idev;
333 #endif
334         return 0;
335 }
336
337 int tegra_ion_remove(struct platform_device *pdev)
338 {
339         struct ion_device *idev = platform_get_drvdata(pdev);
340         int i;
341
342         ion_device_destroy(idev);
343         for (i = 0; i < num_heaps; i++)
344                 ion_heap_destroy(heaps[i]);
345         kfree(heaps);
346         return 0;
347 }
348
349 static struct platform_driver ion_driver = {
350         .probe = tegra_ion_probe,
351         .remove = tegra_ion_remove,
352         .driver = { .name = "ion-tegra" }
353 };
354
355 static int __init ion_init(void)
356 {
357         return platform_driver_register(&ion_driver);
358 }
359
360 static void __exit ion_exit(void)
361 {
362         platform_driver_unregister(&ion_driver);
363 }
364
365 fs_initcall(ion_init);
366 module_exit(ion_exit);
367
368 #if !defined(CONFIG_TEGRA_NVMAP)
369 struct nvmap_client *nvmap_create_client(struct nvmap_device *dev,
370                                          const char *name)
371 {
372         return ion_client_create(dev, CLIENT_HEAP_MASK, name);
373 }
374
375 struct nvmap_handle_ref *nvmap_alloc(struct nvmap_client *client, size_t size,
376                                      size_t align, unsigned int flags,
377                                      unsigned int heap_mask)
378 {
379         return ion_alloc(client, size, align, HEAP_FLAGS);
380 }
381
382 void nvmap_free(struct nvmap_client *client, struct nvmap_handle_ref *r)
383 {
384         ion_free(client, r);
385 }
386
387 void *nvmap_mmap(struct nvmap_handle_ref *r)
388 {
389         return ion_map_kernel(r->client, r);
390 }
391
392 void nvmap_munmap(struct nvmap_handle_ref *r, void *addr)
393 {
394         ion_unmap_kernel(r->client, r);
395 }
396
397 struct nvmap_client *nvmap_client_get_file(int fd)
398 {
399         return ion_client_get_file(fd);
400 }
401
402 struct nvmap_client *nvmap_client_get(struct nvmap_client *client)
403 {
404         ion_client_get(client);
405         return client;
406 }
407
408 void nvmap_client_put(struct nvmap_client *c)
409 {
410         ion_client_put(c);
411 }
412
413 phys_addr_t nvmap_pin(struct nvmap_client *c, struct nvmap_handle_ref *r)
414 {
415         ion_phys_addr_t addr;
416         size_t len;
417
418         ion_handle_get(r);
419         ion_phys(c, r, &addr, &len);
420         wmb();
421         return addr;
422 }
423
424 phys_addr_t nvmap_handle_address(struct nvmap_client *c, unsigned long id)
425 {
426         struct ion_handle *handle;
427         ion_phys_addr_t addr;
428         size_t len;
429
430         handle = nvmap_convert_handle_u2k(id);
431         ion_phys(c, handle, &addr, &len);
432         return addr;
433 }
434
435 void nvmap_unpin(struct nvmap_client *client, struct nvmap_handle_ref *r)
436 {
437         if (r)
438                 ion_handle_put(r);
439 }
440
441 static int nvmap_reloc_pin_array(struct ion_client *client,
442                                  const struct nvmap_pinarray_elem *arr,
443                                  int nr, struct ion_handle *gather)
444 {
445         struct ion_handle *last_patch = NULL;
446         void *patch_addr;
447         ion_phys_addr_t pin_addr;
448         size_t len;
449         int i;
450
451         for (i = 0; i < nr; i++) {
452                 struct ion_handle *patch;
453                 struct ion_handle *pin;
454                 ion_phys_addr_t reloc_addr;
455
456                 /* all of the handles are validated and get'ted prior to
457                  * calling this function, so casting is safe here */
458                 pin = (struct ion_handle *)arr[i].pin_mem;
459
460                 if (arr[i].patch_mem == (unsigned long)last_patch) {
461                         patch = last_patch;
462                 } else if (arr[i].patch_mem == (unsigned long)gather) {
463                         patch = gather;
464                 } else {
465                         if (last_patch)
466                                 ion_handle_put(last_patch);
467
468                         ion_handle_get((struct ion_handle *)arr[i].patch_mem);
469                         patch = (struct ion_handle *)arr[i].patch_mem;
470                         if (!patch)
471                                 return -EPERM;
472                         last_patch = patch;
473                 }
474
475                 patch_addr = ion_map_kernel(client, patch);
476                 patch_addr = patch_addr + arr[i].patch_offset;
477
478                 ion_phys(client, pin, &pin_addr, &len);
479                 reloc_addr = pin_addr + arr[i].pin_offset;
480                 __raw_writel(reloc_addr, patch_addr);
481                 ion_unmap_kernel(client, patch);
482         }
483
484         if (last_patch)
485                 ion_handle_put(last_patch);
486
487         wmb();
488         return 0;
489 }
490
491 int nvmap_pin_array(struct nvmap_client *client, struct nvmap_handle *gather,
492                     const struct nvmap_pinarray_elem *arr, int nr,
493                     struct nvmap_handle **unique)
494 {
495         int i;
496         int count = 0;
497
498         /* FIXME: take care of duplicate ones & validation. */
499         for (i = 0; i < nr; i++) {
500                 unique[i] = (struct nvmap_handle *)arr[i].pin_mem;
501                 nvmap_pin(client, (struct nvmap_handle_ref *)unique[i]);
502                 count++;
503         }
504         nvmap_reloc_pin_array((struct ion_client *)client,
505                 arr, nr, (struct ion_handle *)gather);
506         return nr;
507 }
508
509 void nvmap_unpin_handles(struct nvmap_client *client,
510                          struct nvmap_handle **h, int nr)
511 {
512         int i;
513
514         for (i = 0; i < nr; i++)
515                 nvmap_unpin(client, h[i]);
516 }
517
518 int nvmap_patch_word(struct nvmap_client *client,
519                      struct nvmap_handle *patch,
520                      u32 patch_offset, u32 patch_value)
521 {
522         void *vaddr;
523         u32 *patch_addr;
524
525         vaddr = ion_map_kernel(client, patch);
526         patch_addr = vaddr + patch_offset;
527         __raw_writel(patch_value, patch_addr);
528         wmb();
529         ion_unmap_kernel(client, patch);
530         return 0;
531 }
532
533 struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h);
534 struct nvmap_handle *nvmap_get_handle_id(struct nvmap_client *client,
535                                          unsigned long id)
536 {
537         struct ion_handle *handle;
538
539         handle = (struct ion_handle *)nvmap_convert_handle_u2k(id);
540         pr_debug("id=0x%x, h=0x%x,c=0x%x",
541                 (u32)id, (u32)handle, (u32)client);
542         nvmap_handle_get(handle);
543         return handle;
544 }
545
546 struct nvmap_handle_ref *nvmap_duplicate_handle_id(struct nvmap_client *client,
547                                                    unsigned long id)
548 {
549         struct ion_buffer *buffer;
550         struct ion_handle *handle;
551         struct ion_client *ion_client = client;
552
553         handle = (struct ion_handle *)nvmap_convert_handle_u2k(id);
554         pr_debug("id=0x%x, h=0x%x,c=0x%x",
555                 (u32)id, (u32)handle, (u32)client);
556         buffer = handle->buffer;
557
558         handle = ion_handle_create(client, buffer);
559
560         mutex_lock(&ion_client->lock);
561         ion_handle_add(ion_client, handle);
562         mutex_unlock(&ion_client->lock);
563
564         pr_debug("dup id=0x%x, h=0x%x", (u32)id, (u32)handle);
565         return handle;
566 }
567
568 void _nvmap_handle_free(struct nvmap_handle *h)
569 {
570         ion_handle_put(h);
571 }
572
573 struct nvmap_handle_ref *nvmap_alloc_iovm(struct nvmap_client *client,
574         size_t size, size_t align, unsigned int flags, unsigned int iova_start)
575 {
576         struct ion_handle *h;
577
578         h = ion_alloc(client, size, align, 0xFF);
579         ion_remap_dma(client, h, iova_start);
580         return h;
581 }
582
583 void nvmap_free_iovm(struct nvmap_client *client, struct nvmap_handle_ref *r)
584 {
585         ion_free(client, r);
586 }
587
588 struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h)
589 {
590         ion_handle_get(h);
591         return h;
592 }
593
594 void nvmap_handle_put(struct nvmap_handle *h)
595 {
596         ion_handle_put(h);
597 }
598
599 #endif