gpu: ion: fill in buffer->{dev,size} before mapping new buffers
[linux-2.6.git] / drivers / gpu / ion / ion.c
1 /*
2  * drivers/gpu/ion/ion.c
3  *
4  * Copyright (C) 2011 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16
17 #define pr_fmt(fmt)     "%s():%d: " fmt, __func__, __LINE__
18
19 #include <linux/device.h>
20 #include <linux/file.h>
21 #include <linux/fs.h>
22 #include <linux/anon_inodes.h>
23 #include <linux/ion.h>
24 #include <linux/list.h>
25 #include <linux/memblock.h>
26 #include <linux/miscdevice.h>
27 #include <linux/export.h>
28 #include <linux/mm.h>
29 #include <linux/mm_types.h>
30 #include <linux/rbtree.h>
31 #include <linux/sched.h>
32 #include <linux/slab.h>
33 #include <linux/seq_file.h>
34 #include <linux/uaccess.h>
35 #include <linux/debugfs.h>
36 #include <linux/dma-buf.h>
37
38 #include "ion_priv.h"
39 #define DEBUG
40
41 /**
42  * struct ion_device - the metadata of the ion device node
43  * @dev:                the actual misc device
44  * @buffers:    an rb tree of all the existing buffers
45  * @lock:               lock protecting the buffers & heaps trees
46  * @heaps:              list of all the heaps in the system
47  * @user_clients:       list of all the clients created from userspace
48  */
49 struct ion_device {
50         struct miscdevice dev;
51         struct rb_root buffers;
52         struct mutex lock;
53         struct rb_root heaps;
54         long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
55                               unsigned long arg);
56         struct rb_root clients;
57         struct dentry *debug_root;
58 };
59
60 /**
61  * struct ion_client - a process/hw block local address space
62  * @node:               node in the tree of all clients
63  * @dev:                backpointer to ion device
64  * @handles:            an rb tree of all the handles in this client
65  * @lock:               lock protecting the tree of handles
66  * @heap_mask:          mask of all supported heaps
67  * @name:               used for debugging
68  * @task:               used for debugging
69  *
70  * A client represents a list of buffers this client may access.
71  * The mutex stored here is used to protect both handles tree
72  * as well as the handles themselves, and should be held while modifying either.
73  */
74 struct ion_client {
75         struct rb_node node;
76         struct ion_device *dev;
77         struct rb_root handles;
78         struct mutex lock;
79         unsigned int heap_mask;
80         const char *name;
81         struct task_struct *task;
82         pid_t pid;
83         struct dentry *debug_root;
84 };
85
86 /**
87  * ion_handle - a client local reference to a buffer
88  * @ref:                reference count
89  * @client:             back pointer to the client the buffer resides in
90  * @buffer:             pointer to the buffer
91  * @node:               node in the client's handle rbtree
92  * @kmap_cnt:           count of times this client has mapped to kernel
93  * @dmap_cnt:           count of times this client has mapped for dma
94  *
95  * Modifications to node, map_cnt or mapping should be protected by the
96  * lock in the client.  Other fields are never changed after initialization.
97  */
98 struct ion_handle {
99         struct kref ref;
100         struct ion_client *client;
101         struct ion_buffer *buffer;
102         struct rb_node node;
103         unsigned int kmap_cnt;
104 };
105
106 /* this function should only be called while dev->lock is held */
107 static void ion_buffer_add(struct ion_device *dev,
108                            struct ion_buffer *buffer)
109 {
110         struct rb_node **p = &dev->buffers.rb_node;
111         struct rb_node *parent = NULL;
112         struct ion_buffer *entry;
113
114         while (*p) {
115                 parent = *p;
116                 entry = rb_entry(parent, struct ion_buffer, node);
117
118                 if (buffer < entry) {
119                         p = &(*p)->rb_left;
120                 } else if (buffer > entry) {
121                         p = &(*p)->rb_right;
122                 } else {
123                         pr_err("buffer already found.");
124                         BUG();
125                 }
126         }
127
128         rb_link_node(&buffer->node, parent, p);
129         rb_insert_color(&buffer->node, &dev->buffers);
130 }
131
132 /* this function should only be called while dev->lock is held */
133 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
134                                      struct ion_device *dev,
135                                      unsigned long len,
136                                      unsigned long align,
137                                      unsigned long flags)
138 {
139         struct ion_buffer *buffer;
140         struct sg_table *table;
141         int ret;
142
143         buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
144         if (!buffer)
145                 return ERR_PTR(-ENOMEM);
146
147         buffer->heap = heap;
148         kref_init(&buffer->ref);
149
150         ret = heap->ops->allocate(heap, buffer, len, align, flags);
151         if (ret) {
152                 kfree(buffer);
153                 return ERR_PTR(ret);
154         }
155
156         buffer->dev = dev;
157         buffer->size = len;
158
159         table = buffer->heap->ops->map_dma(buffer->heap, buffer);
160         if (IS_ERR_OR_NULL(table)) {
161                 heap->ops->free(buffer);
162                 kfree(buffer);
163                 return ERR_PTR(PTR_ERR(table));
164         }
165         buffer->sg_table = table;
166
167         mutex_init(&buffer->lock);
168         ion_buffer_add(dev, buffer);
169         return buffer;
170 }
171
172 static void ion_buffer_destroy(struct kref *kref)
173 {
174         struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
175         struct ion_device *dev = buffer->dev;
176
177         if (WARN_ON(buffer->kmap_cnt > 0))
178                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
179
180         buffer->heap->ops->unmap_dma(buffer->heap, buffer);
181         buffer->heap->ops->free(buffer);
182         mutex_lock(&dev->lock);
183         rb_erase(&buffer->node, &dev->buffers);
184         mutex_unlock(&dev->lock);
185         kfree(buffer);
186 }
187
188 void ion_buffer_get(struct ion_buffer *buffer)
189 {
190         kref_get(&buffer->ref);
191 }
192
193 static int ion_buffer_put(struct ion_buffer *buffer)
194 {
195         return kref_put(&buffer->ref, ion_buffer_destroy);
196 }
197
198 struct ion_handle *ion_handle_create(struct ion_client *client,
199                                      struct ion_buffer *buffer)
200 {
201         struct ion_handle *handle;
202
203         handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
204         if (!handle)
205                 return ERR_PTR(-ENOMEM);
206         kref_init(&handle->ref);
207         rb_init_node(&handle->node);
208         handle->client = client;
209         ion_buffer_get(buffer);
210         handle->buffer = buffer;
211
212         return handle;
213 }
214
215 static void ion_handle_kmap_put(struct ion_handle *);
216
217 static void ion_handle_destroy(struct kref *kref)
218 {
219         struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
220         struct ion_client *client = handle->client;
221         struct ion_buffer *buffer = handle->buffer;
222
223         mutex_lock(&client->lock);
224
225         mutex_lock(&buffer->lock);
226         while (buffer->kmap_cnt)
227                 ion_handle_kmap_put(handle);
228         mutex_unlock(&buffer->lock);
229
230         if (!RB_EMPTY_NODE(&handle->node))
231                 rb_erase(&handle->node, &client->handles);
232         mutex_unlock(&client->lock);
233
234         ion_buffer_put(buffer);
235         kfree(handle);
236 }
237
238 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
239 {
240         return handle->buffer;
241 }
242
243 void ion_handle_get(struct ion_handle *handle)
244 {
245         kref_get(&handle->ref);
246 }
247
248 int ion_handle_put(struct ion_handle *handle)
249 {
250         return kref_put(&handle->ref, ion_handle_destroy);
251 }
252
253 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
254                                             struct ion_buffer *buffer)
255 {
256         struct rb_node *n;
257
258         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
259                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
260                                                      node);
261                 if (handle->buffer == buffer)
262                         return handle;
263         }
264         return NULL;
265 }
266
267 bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
268 {
269         struct rb_node *n = client->handles.rb_node;
270
271         while (n) {
272                 struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
273                                                           node);
274                 if (handle < handle_node)
275                         n = n->rb_left;
276                 else if (handle > handle_node)
277                         n = n->rb_right;
278                 else
279                         return true;
280         }
281         WARN(1, "invalid handle passed h=0x%x,comm=%d\n", handle,
282                 current->group_leader->comm);
283         return false;
284 }
285
286 void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
287 {
288         struct rb_node **p = &client->handles.rb_node;
289         struct rb_node *parent = NULL;
290         struct ion_handle *entry;
291
292         while (*p) {
293                 parent = *p;
294                 entry = rb_entry(parent, struct ion_handle, node);
295
296                 if (handle < entry)
297                         p = &(*p)->rb_left;
298                 else if (handle > entry)
299                         p = &(*p)->rb_right;
300                 else
301                         WARN(1, "%s: buffer already found.", __func__);
302         }
303
304         rb_link_node(&handle->node, parent, p);
305         rb_insert_color(&handle->node, &client->handles);
306 }
307
308 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
309                              size_t align, unsigned int flags)
310 {
311         struct rb_node *n;
312         struct ion_handle *handle;
313         struct ion_device *dev = client->dev;
314         struct ion_buffer *buffer = NULL;
315
316         /*
317          * traverse the list of heaps available in this system in priority
318          * order.  If the heap type is supported by the client, and matches the
319          * request of the caller allocate from it.  Repeat until allocate has
320          * succeeded or all heaps have been tried
321          */
322         if (WARN_ON(!len))
323                 return ERR_PTR(-EINVAL);
324
325         len = PAGE_ALIGN(len);
326
327         mutex_lock(&dev->lock);
328         for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
329                 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
330                 /* if the client doesn't support this heap type */
331                 if (!((1 << heap->type) & client->heap_mask))
332                         continue;
333                 /* if the caller didn't specify this heap type */
334                 if (!((1 << heap->id) & flags))
335                         continue;
336                 buffer = ion_buffer_create(heap, dev, len, align, flags);
337                 if (!IS_ERR_OR_NULL(buffer))
338                         break;
339         }
340         mutex_unlock(&dev->lock);
341
342         if (buffer == NULL)
343                 return ERR_PTR(-ENODEV);
344
345         if (IS_ERR(buffer))
346                 return ERR_PTR(PTR_ERR(buffer));
347
348         handle = ion_handle_create(client, buffer);
349
350         /*
351          * ion_buffer_create will create a buffer with a ref_cnt of 1,
352          * and ion_handle_create will take a second reference, drop one here
353          */
354         ion_buffer_put(buffer);
355
356         if (!IS_ERR(handle)) {
357                 mutex_lock(&client->lock);
358                 ion_handle_add(client, handle);
359                 mutex_unlock(&client->lock);
360         }
361
362
363         return handle;
364 }
365
366 void ion_free(struct ion_client *client, struct ion_handle *handle)
367 {
368         bool valid_handle;
369
370         BUG_ON(client != handle->client);
371
372         mutex_lock(&client->lock);
373         valid_handle = ion_handle_validate(client, handle);
374         mutex_unlock(&client->lock);
375
376         if (!valid_handle) {
377                 WARN(1, "%s: invalid handle passed to free.\n", __func__);
378                 return;
379         }
380         ion_handle_put(handle);
381 }
382
383 int ion_phys(struct ion_client *client, struct ion_handle *handle,
384              ion_phys_addr_t *addr, size_t *len)
385 {
386         struct ion_buffer *buffer;
387         int ret;
388
389         mutex_lock(&client->lock);
390         if (!ion_handle_validate(client, handle)) {
391                 mutex_unlock(&client->lock);
392                 return -EINVAL;
393         }
394
395         buffer = handle->buffer;
396
397         if (!buffer->heap->ops->phys) {
398                 pr_err("ion_phys is not implemented by this heap.\n");
399                 mutex_unlock(&client->lock);
400                 return -ENODEV;
401         }
402         mutex_unlock(&client->lock);
403         ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
404         return ret;
405 }
406
407 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
408 {
409         void *vaddr;
410
411         if (buffer->kmap_cnt) {
412                 buffer->kmap_cnt++;
413                 return buffer->vaddr;
414         }
415         vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
416         if (IS_ERR_OR_NULL(vaddr))
417                 return vaddr;
418         buffer->vaddr = vaddr;
419         buffer->kmap_cnt++;
420         return vaddr;
421 }
422
423 static void *ion_handle_kmap_get(struct ion_handle *handle)
424 {
425         struct ion_buffer *buffer = handle->buffer;
426         void *vaddr;
427
428         if (handle->kmap_cnt) {
429                 handle->kmap_cnt++;
430                 return buffer->vaddr;
431         }
432         vaddr = ion_buffer_kmap_get(buffer);
433         if (IS_ERR_OR_NULL(vaddr))
434                 return vaddr;
435         handle->kmap_cnt++;
436         return vaddr;
437 }
438
439 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
440 {
441         buffer->kmap_cnt--;
442         if (!buffer->kmap_cnt) {
443                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
444                 buffer->vaddr = NULL;
445         }
446 }
447
448 static void ion_handle_kmap_put(struct ion_handle *handle)
449 {
450         struct ion_buffer *buffer = handle->buffer;
451
452         handle->kmap_cnt--;
453         if (!handle->kmap_cnt)
454                 ion_buffer_kmap_put(buffer);
455 }
456
457 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
458 {
459         struct ion_buffer *buffer;
460         void *vaddr;
461
462         mutex_lock(&client->lock);
463         if (!ion_handle_validate(client, handle)) {
464                 pr_err("%s: invalid handle passed to map_kernel.\n",
465                        __func__);
466                 mutex_unlock(&client->lock);
467                 return ERR_PTR(-EINVAL);
468         }
469
470         buffer = handle->buffer;
471
472         if (!handle->buffer->heap->ops->map_kernel) {
473                 pr_err("map_kernel is not implemented by this heap.\n");
474                 mutex_unlock(&client->lock);
475                 return ERR_PTR(-ENODEV);
476         }
477
478         mutex_lock(&buffer->lock);
479         vaddr = ion_handle_kmap_get(handle);
480         mutex_unlock(&buffer->lock);
481         mutex_unlock(&client->lock);
482         return vaddr;
483 }
484
485 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
486 {
487         struct ion_buffer *buffer;
488
489         mutex_lock(&client->lock);
490         buffer = handle->buffer;
491         mutex_lock(&buffer->lock);
492         ion_handle_kmap_put(handle);
493         mutex_unlock(&buffer->lock);
494         mutex_unlock(&client->lock);
495 }
496
497 struct scatterlist *iommu_heap_remap_dma(struct ion_heap *heap,
498                                               struct ion_buffer *buf,
499                                               unsigned long addr);
500 int ion_remap_dma(struct ion_client *client,
501                         struct ion_handle *handle,
502                         unsigned long addr)
503 {
504         struct ion_buffer *buffer;
505         int ret;
506
507         mutex_lock(&client->lock);
508         if (!ion_handle_validate(client, handle)) {
509                 pr_err("invalid handle passed to map_dma.\n");
510                 mutex_unlock(&client->lock);
511                 return -EINVAL;
512         }
513         buffer = handle->buffer;
514         mutex_lock(&buffer->lock);
515
516         ret = iommu_heap_remap_dma(buffer->heap, buffer, addr);
517
518         mutex_unlock(&buffer->lock);
519         mutex_unlock(&client->lock);
520         return ret;
521 }
522
523 static int ion_debug_client_show(struct seq_file *s, void *unused)
524 {
525         struct ion_client *client = s->private;
526         struct rb_node *n;
527         size_t sizes[ION_NUM_HEAPS] = {0};
528         const char *names[ION_NUM_HEAPS] = {0};
529         int i;
530
531         mutex_lock(&client->lock);
532         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
533                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
534                                                      node);
535                 enum ion_heap_type type = handle->buffer->heap->type;
536
537                 if (!names[type])
538                         names[type] = handle->buffer->heap->name;
539                 sizes[type] += handle->buffer->size;
540         }
541         mutex_unlock(&client->lock);
542
543         seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
544         for (i = 0; i < ION_NUM_HEAPS; i++) {
545                 if (!names[i])
546                         continue;
547                 seq_printf(s, "%16.16s: %16u\n", names[i], sizes[i]);
548         }
549         return 0;
550 }
551
552 static int ion_debug_client_open(struct inode *inode, struct file *file)
553 {
554         return single_open(file, ion_debug_client_show, inode->i_private);
555 }
556
557 static const struct file_operations debug_client_fops = {
558         .open = ion_debug_client_open,
559         .read = seq_read,
560         .llseek = seq_lseek,
561         .release = single_release,
562 };
563
564 struct ion_client *ion_client_create(struct ion_device *dev,
565                                      unsigned int heap_mask,
566                                      const char *name)
567 {
568         struct ion_client *client;
569         struct task_struct *task;
570         struct rb_node **p;
571         struct rb_node *parent = NULL;
572         struct ion_client *entry;
573         char debug_name[64];
574         pid_t pid;
575
576         get_task_struct(current->group_leader);
577         task_lock(current->group_leader);
578         pid = task_pid_nr(current->group_leader);
579         /* don't bother to store task struct for kernel threads,
580            they can't be killed anyway */
581         if (current->group_leader->flags & PF_KTHREAD) {
582                 put_task_struct(current->group_leader);
583                 task = NULL;
584         } else {
585                 task = current->group_leader;
586         }
587         task_unlock(current->group_leader);
588
589         client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
590         if (!client) {
591                 if (task)
592                         put_task_struct(current->group_leader);
593                 return ERR_PTR(-ENOMEM);
594         }
595
596         client->dev = dev;
597         client->handles = RB_ROOT;
598         mutex_init(&client->lock);
599         client->name = name;
600         client->heap_mask = heap_mask;
601         client->task = task;
602         client->pid = pid;
603
604         mutex_lock(&dev->lock);
605         p = &dev->clients.rb_node;
606         while (*p) {
607                 parent = *p;
608                 entry = rb_entry(parent, struct ion_client, node);
609
610                 if (client < entry)
611                         p = &(*p)->rb_left;
612                 else if (client > entry)
613                         p = &(*p)->rb_right;
614         }
615         rb_link_node(&client->node, parent, p);
616         rb_insert_color(&client->node, &dev->clients);
617
618         snprintf(debug_name, 64, "%u", client->pid);
619         client->debug_root = debugfs_create_file(debug_name, 0664,
620                                                  dev->debug_root, client,
621                                                  &debug_client_fops);
622         mutex_unlock(&dev->lock);
623
624         return client;
625 }
626
627 void ion_client_destroy(struct ion_client *client)
628 {
629         struct ion_device *dev = client->dev;
630         struct rb_node *n;
631
632         pr_debug("\n");
633         while ((n = rb_first(&client->handles))) {
634                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
635                                                      node);
636                 ion_handle_destroy(&handle->ref);
637         }
638         mutex_lock(&dev->lock);
639         if (client->task)
640                 put_task_struct(client->task);
641         rb_erase(&client->node, &dev->clients);
642         debugfs_remove_recursive(client->debug_root);
643         mutex_unlock(&dev->lock);
644
645         kfree(client);
646 }
647
648 struct sg_table *ion_sg_table(struct ion_client *client,
649                               struct ion_handle *handle)
650 {
651         struct ion_buffer *buffer;
652         struct sg_table *table;
653
654         mutex_lock(&client->lock);
655         if (!ion_handle_validate(client, handle)) {
656                 pr_err("%s: invalid handle passed to map_dma.\n",
657                        __func__);
658                 mutex_unlock(&client->lock);
659                 return ERR_PTR(-EINVAL);
660         }
661         buffer = handle->buffer;
662         table = buffer->sg_table;
663         mutex_unlock(&client->lock);
664         return table;
665 }
666
667 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
668                                         enum dma_data_direction direction)
669 {
670         struct dma_buf *dmabuf = attachment->dmabuf;
671         struct ion_buffer *buffer = dmabuf->priv;
672
673         return buffer->sg_table;
674 }
675
676 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
677                               struct sg_table *table,
678                               enum dma_data_direction direction)
679 {
680 }
681
682 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
683 {
684         struct ion_buffer *buffer = dmabuf->priv;
685         int ret;
686
687         if (!buffer->heap->ops->map_user) {
688                 pr_err("%s: this heap does not define a method for mapping "
689                        "to userspace\n", __func__);
690                 return -EINVAL;
691         }
692
693         mutex_lock(&buffer->lock);
694         /* now map it to userspace */
695         ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
696         mutex_unlock(&buffer->lock);
697
698         if (ret)
699                 pr_err("%s: failure mapping buffer to userspace\n",
700                        __func__);
701
702         return ret;
703 }
704
705 static void ion_dma_buf_release(struct dma_buf *dmabuf)
706 {
707         struct ion_buffer *buffer = dmabuf->priv;
708         ion_buffer_put(buffer);
709 }
710
711 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
712 {
713         struct ion_buffer *buffer = dmabuf->priv;
714         return buffer->vaddr + offset;
715 }
716
717 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
718                                void *ptr)
719 {
720         return;
721 }
722
723 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
724                                         size_t len,
725                                         enum dma_data_direction direction)
726 {
727         struct ion_buffer *buffer = dmabuf->priv;
728         void *vaddr;
729
730         if (!buffer->heap->ops->map_kernel) {
731                 pr_err("%s: map kernel is not implemented by this heap.\n",
732                        __func__);
733                 return -ENODEV;
734         }
735
736         mutex_lock(&buffer->lock);
737         vaddr = ion_buffer_kmap_get(buffer);
738         mutex_unlock(&buffer->lock);
739         if (IS_ERR(vaddr))
740                 return PTR_ERR(vaddr);
741         if (!vaddr)
742                 return -ENOMEM;
743         return 0;
744 }
745
746 static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
747                                        size_t len,
748                                        enum dma_data_direction direction)
749 {
750         struct ion_buffer *buffer = dmabuf->priv;
751
752         mutex_lock(&buffer->lock);
753         ion_buffer_kmap_put(buffer);
754         mutex_unlock(&buffer->lock);
755 }
756
757 struct dma_buf_ops dma_buf_ops = {
758         .map_dma_buf = ion_map_dma_buf,
759         .unmap_dma_buf = ion_unmap_dma_buf,
760         .mmap = ion_mmap,
761         .release = ion_dma_buf_release,
762         .begin_cpu_access = ion_dma_buf_begin_cpu_access,
763         .end_cpu_access = ion_dma_buf_end_cpu_access,
764         .kmap_atomic = ion_dma_buf_kmap,
765         .kunmap_atomic = ion_dma_buf_kunmap,
766         .kmap = ion_dma_buf_kmap,
767         .kunmap = ion_dma_buf_kunmap,
768 };
769
770 int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
771 {
772         struct ion_buffer *buffer;
773         struct dma_buf *dmabuf;
774         bool valid_handle;
775         int fd;
776
777         mutex_lock(&client->lock);
778         valid_handle = ion_handle_validate(client, handle);
779         mutex_unlock(&client->lock);
780         if (!valid_handle) {
781                 WARN("%s: invalid handle passed to share.\n", __func__);
782                 return -EINVAL;
783         }
784
785         buffer = handle->buffer;
786         ion_buffer_get(buffer);
787         dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
788         if (IS_ERR(dmabuf)) {
789                 ion_buffer_put(buffer);
790                 return PTR_ERR(dmabuf);
791         }
792         fd = dma_buf_fd(dmabuf, O_CLOEXEC);
793         if (fd < 0) {
794                 dma_buf_put(dmabuf);
795                 ion_buffer_put(buffer);
796         }
797         return fd;
798 }
799
800 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
801 {
802         struct dma_buf *dmabuf;
803         struct ion_buffer *buffer;
804         struct ion_handle *handle;
805
806         dmabuf = dma_buf_get(fd);
807         if (IS_ERR_OR_NULL(dmabuf))
808                 return ERR_PTR(PTR_ERR(dmabuf));
809         /* if this memory came from ion */
810
811         if (dmabuf->ops != &dma_buf_ops) {
812                 pr_err("%s: can not import dmabuf from another exporter\n",
813                        __func__);
814                 dma_buf_put(dmabuf);
815                 return ERR_PTR(-EINVAL);
816         }
817         buffer = dmabuf->priv;
818
819         mutex_lock(&client->lock);
820         /* if a handle exists for this buffer just take a reference to it */
821         handle = ion_handle_lookup(client, buffer);
822         if (!IS_ERR_OR_NULL(handle)) {
823                 ion_handle_get(handle);
824                 goto end;
825         }
826         handle = ion_handle_create(client, buffer);
827         if (IS_ERR_OR_NULL(handle))
828                 goto end;
829         ion_handle_add(client, handle);
830 end:
831         mutex_unlock(&client->lock);
832         dma_buf_put(dmabuf);
833         return handle;
834 }
835
836 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
837 {
838         struct ion_client *client = filp->private_data;
839
840         switch (cmd) {
841         case ION_IOC_ALLOC:
842         {
843                 struct ion_allocation_data data;
844
845                 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
846                         return -EFAULT;
847                 data.handle = ion_alloc(client, data.len, data.align,
848                                              data.flags);
849
850                 if (IS_ERR(data.handle))
851                         return PTR_ERR(data.handle);
852
853                 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
854                         ion_free(client, data.handle);
855                         return -EFAULT;
856                 }
857                 break;
858         }
859         case ION_IOC_FREE:
860         {
861                 struct ion_handle_data data;
862                 bool valid;
863
864                 if (copy_from_user(&data, (void __user *)arg,
865                                    sizeof(struct ion_handle_data)))
866                         return -EFAULT;
867                 mutex_lock(&client->lock);
868                 valid = ion_handle_validate(client, data.handle);
869                 mutex_unlock(&client->lock);
870                 if (!valid)
871                         return -EINVAL;
872                 ion_free(client, data.handle);
873                 break;
874         }
875         case ION_IOC_SHARE:
876         {
877                 struct ion_fd_data data;
878
879                 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
880                         return -EFAULT;
881                 data.fd = ion_share_dma_buf(client, data.handle);
882                 if (copy_to_user((void __user *)arg, &data, sizeof(data)))
883                         return -EFAULT;
884                 break;
885         }
886         case ION_IOC_IMPORT:
887         {
888                 struct ion_fd_data data;
889                 if (copy_from_user(&data, (void __user *)arg,
890                                    sizeof(struct ion_fd_data)))
891                         return -EFAULT;
892                 data.handle = ion_import_dma_buf(client, data.fd);
893                 if (IS_ERR(data.handle))
894                         data.handle = NULL;
895                 if (copy_to_user((void __user *)arg, &data,
896                                  sizeof(struct ion_fd_data)))
897                         return -EFAULT;
898                 break;
899         }
900         case ION_IOC_CUSTOM:
901         {
902                 struct ion_device *dev = client->dev;
903                 struct ion_custom_data data;
904
905                 if (!dev->custom_ioctl)
906                         return -ENOTTY;
907                 if (copy_from_user(&data, (void __user *)arg,
908                                 sizeof(struct ion_custom_data)))
909                         return -EFAULT;
910                 return dev->custom_ioctl(client, data.cmd, data.arg);
911         }
912         default:
913                 return -ENOTTY;
914         }
915         return 0;
916 }
917
918 static int ion_release(struct inode *inode, struct file *file)
919 {
920         struct ion_client *client = file->private_data;
921
922         pr_debug("%s: %d\n", __func__, __LINE__);
923         ion_client_destroy(client);
924         return 0;
925 }
926
927 static int ion_open(struct inode *inode, struct file *file)
928 {
929         struct miscdevice *miscdev = file->private_data;
930         struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
931         struct ion_client *client;
932
933         pr_debug("\n");
934         client = ion_client_create(dev, -1, "user");
935         if (IS_ERR_OR_NULL(client))
936                 return PTR_ERR(client);
937         file->private_data = client;
938
939         return 0;
940 }
941
942 static const struct file_operations ion_fops = {
943         .owner          = THIS_MODULE,
944         .open           = ion_open,
945         .release        = ion_release,
946         .unlocked_ioctl = ion_ioctl,
947 };
948
949 static size_t ion_debug_heap_total(struct ion_client *client,
950                                    enum ion_heap_type type)
951 {
952         size_t size = 0;
953         struct rb_node *n;
954
955         mutex_lock(&client->lock);
956         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
957                 struct ion_handle *handle = rb_entry(n,
958                                                      struct ion_handle,
959                                                      node);
960                 if (handle->buffer->heap->type == type)
961                         size += handle->buffer->size;
962         }
963         mutex_unlock(&client->lock);
964         return size;
965 }
966
967 static int ion_debug_heap_show(struct seq_file *s, void *unused)
968 {
969         struct ion_heap *heap = s->private;
970         struct ion_device *dev = heap->dev;
971         struct rb_node *n;
972
973         seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
974
975         for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
976                 struct ion_client *client = rb_entry(n, struct ion_client,
977                                                      node);
978                 size_t size = ion_debug_heap_total(client, heap->type);
979                 if (!size)
980                         continue;
981                 if (client->task) {
982                         char task_comm[TASK_COMM_LEN];
983
984                         get_task_comm(task_comm, client->task);
985                         seq_printf(s, "%16.s %16u %16u\n", task_comm,
986                                    client->pid, size);
987                 } else {
988                         seq_printf(s, "%16.s %16u %16u\n", client->name,
989                                    client->pid, size);
990                 }
991         }
992         return 0;
993 }
994
995 static int ion_debug_heap_open(struct inode *inode, struct file *file)
996 {
997         return single_open(file, ion_debug_heap_show, inode->i_private);
998 }
999
1000 static const struct file_operations debug_heap_fops = {
1001         .open = ion_debug_heap_open,
1002         .read = seq_read,
1003         .llseek = seq_lseek,
1004         .release = single_release,
1005 };
1006
1007 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1008 {
1009         struct rb_node **p = &dev->heaps.rb_node;
1010         struct rb_node *parent = NULL;
1011         struct ion_heap *entry;
1012
1013         if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1014             !heap->ops->unmap_dma)
1015                 pr_err("%s: can not add heap with invalid ops struct.\n",
1016                        __func__);
1017
1018         heap->dev = dev;
1019         mutex_lock(&dev->lock);
1020         while (*p) {
1021                 parent = *p;
1022                 entry = rb_entry(parent, struct ion_heap, node);
1023
1024                 if (heap->id < entry->id) {
1025                         p = &(*p)->rb_left;
1026                 } else if (heap->id > entry->id ) {
1027                         p = &(*p)->rb_right;
1028                 } else {
1029                         pr_err("can not insert multiple heaps with "
1030                                 "id %d\n", heap->id);
1031                         goto end;
1032                 }
1033         }
1034
1035         rb_link_node(&heap->node, parent, p);
1036         rb_insert_color(&heap->node, &dev->heaps);
1037         debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1038                             &debug_heap_fops);
1039 end:
1040         mutex_unlock(&dev->lock);
1041 }
1042
1043 struct ion_device *ion_device_create(long (*custom_ioctl)
1044                                      (struct ion_client *client,
1045                                       unsigned int cmd,
1046                                       unsigned long arg))
1047 {
1048         struct ion_device *idev;
1049         int ret;
1050
1051         idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1052         if (!idev)
1053                 return ERR_PTR(-ENOMEM);
1054
1055         idev->dev.minor = MISC_DYNAMIC_MINOR;
1056         idev->dev.name = "ion";
1057         idev->dev.fops = &ion_fops;
1058         idev->dev.parent = NULL;
1059         ret = misc_register(&idev->dev);
1060         if (ret) {
1061                 pr_err("ion: failed to register misc device.\n");
1062                 return ERR_PTR(ret);
1063         }
1064
1065         idev->debug_root = debugfs_create_dir("ion", NULL);
1066         if (IS_ERR_OR_NULL(idev->debug_root))
1067                 pr_err("ion: failed to create debug files.\n");
1068
1069         idev->custom_ioctl = custom_ioctl;
1070         idev->buffers = RB_ROOT;
1071         mutex_init(&idev->lock);
1072         idev->heaps = RB_ROOT;
1073         idev->clients = RB_ROOT;
1074         return idev;
1075 }
1076
1077 void ion_device_destroy(struct ion_device *dev)
1078 {
1079         misc_deregister(&dev->dev);
1080         /* XXX need to free the heaps and clients ? */
1081         kfree(dev);
1082 }
1083
1084 struct ion_client *ion_client_get_file(int fd)
1085 {
1086         struct ion_client *client = ERR_PTR(-EFAULT);
1087         struct file *f = fget(fd);
1088         if (!f)
1089                 return ERR_PTR(-EINVAL);
1090
1091         if (f->f_op == &ion_fops) {
1092                 client = f->private_data;
1093                 ion_client_get(client);
1094         }
1095
1096         fput(f);
1097         return client;
1098 }
1099
1100 void __init ion_reserve(struct ion_platform_data *data)
1101 {
1102         int i, ret;
1103
1104         for (i = 0; i < data->nr; i++) {
1105                 if (data->heaps[i].size == 0)
1106                         continue;
1107                 ret = memblock_reserve(data->heaps[i].base,
1108                                        data->heaps[i].size);
1109                 if (ret)
1110                         pr_err("memblock reserve of %x@%lx failed\n",
1111                                data->heaps[i].size,
1112                                data->heaps[i].base);
1113         }
1114 }