[ARM/tegra] Add Tegra3 support
[linux-2.6.git] / arch / arm / mach-tegra / iovmm.c
1 /*
2  * arch/arm/mach-tegra/iovmm.c
3  *
4  * Tegra I/O VM manager
5  *
6  * Copyright (c) 2010, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include <linux/kernel.h>
24 #include <linux/spinlock.h>
25 #include <linux/proc_fs.h>
26 #include <linux/sched.h>
27 #include <linux/string.h>
28 #include <linux/slab.h>
29 #include <linux/syscore_ops.h>
30
31 #include <mach/iovmm.h>
32
33 /* after the best-fit block is located, the remaining pages not needed for
34  * the allocation will be split into a new free block if the number of
35  * remaining pages is >= MIN_SPLIT_PAGE.
36  */
37 #define MIN_SPLIT_PAGE (4)
38 #define MIN_SPLIT_BYTES(_d) (MIN_SPLIT_PAGE<<(_d)->dev->pgsize_bits)
39
40 #define iovmm_start(_b) ((_b)->vm_area.iovm_start)
41 #define iovmm_length(_b) ((_b)->vm_area.iovm_length)
42 #define iovmm_end(_b) (iovmm_start(_b) + iovmm_length(_b))
43
44 /* flags for the block */
45 #define BK_free         0 /* indicates free mappings */
46 #define BK_map_dirty    1 /* used by demand-loaded mappings */
47
48 /* flags for the client */
49 #define CL_locked       0
50
51 /* flags for the domain */
52 #define DM_map_dirty    0
53
54 struct tegra_iovmm_block {
55         struct tegra_iovmm_area vm_area;
56         atomic_t                ref;
57         unsigned long           flags;
58         unsigned long           poison;
59         struct rb_node          free_node;
60         struct rb_node          all_node;
61 };
62
63 struct iovmm_share_group {
64         const char                      *name;
65         struct tegra_iovmm_domain       *domain;
66         struct list_head                client_list;
67         struct list_head                group_list;
68         spinlock_t                      lock;
69 };
70
71 static LIST_HEAD(iovmm_devices);
72 static LIST_HEAD(iovmm_groups);
73 static DEFINE_MUTEX(iovmm_group_list_lock);
74 static DEFINE_SPINLOCK(iovmm_device_list_lock);
75 static struct kmem_cache *iovmm_cache;
76
77 static tegra_iovmm_addr_t iovmm_align_up(struct tegra_iovmm_device *dev,
78         tegra_iovmm_addr_t addr)
79 {
80         addr += (1<<dev->pgsize_bits);
81         addr--;
82         addr &= ~((1<<dev->pgsize_bits)-1);
83         return addr;
84 }
85
86 static tegra_iovmm_addr_t iovmm_align_down(struct tegra_iovmm_device *dev,
87         tegra_iovmm_addr_t addr)
88 {
89         addr &= ~((1<<dev->pgsize_bits)-1);
90         return addr;
91 }
92
93 #define iovmprint(fmt, arg...) snprintf(page+len, count-len, fmt, ## arg)
94
95 static void tegra_iovmm_block_stats(struct tegra_iovmm_domain *domain,
96         unsigned int *num_blocks, unsigned int *num_free,
97         tegra_iovmm_addr_t *total, tegra_iovmm_addr_t *total_free,
98         tegra_iovmm_addr_t *max_free)
99 {
100         struct rb_node *n;
101         struct tegra_iovmm_block *b;
102
103         *num_blocks = 0;
104         *num_free = 0;
105         *total = (tegra_iovmm_addr_t)0;
106         *total_free = (tegra_iovmm_addr_t)0;
107         *max_free = (tegra_iovmm_addr_t)0;
108
109         spin_lock(&domain->block_lock);
110         n = rb_first(&domain->all_blocks);
111         while (n) {
112                 b = rb_entry(n, struct tegra_iovmm_block, all_node);
113                 n = rb_next(n);
114                 (*num_blocks)++;
115                 (*total) += iovmm_length(b);
116                 if (test_bit(BK_free, &b->flags)) {
117                         (*num_free)++;
118                         (*total_free) += iovmm_length(b);
119                         (*max_free) = max_t(tegra_iovmm_addr_t,
120                                 (*max_free), iovmm_length(b));
121                 }
122         }
123         spin_unlock(&domain->block_lock);
124 }
125
126 static int tegra_iovmm_read_proc(char *page, char **start, off_t off,
127         int count, int *eof, void *data)
128 {
129         struct iovmm_share_group *grp;
130         tegra_iovmm_addr_t max_free, total_free, total;
131         unsigned int num, num_free;
132
133         int len = 0;
134
135         mutex_lock(&iovmm_group_list_lock);
136         len += iovmprint("\ngroups\n");
137         if (list_empty(&iovmm_groups))
138                 len += iovmprint("\t<empty>\n");
139         else {
140                 list_for_each_entry(grp, &iovmm_groups, group_list) {
141                         len += iovmprint("\t%s (device: %s)\n",
142                                 (grp->name) ? grp->name : "<unnamed>",
143                                 grp->domain->dev->name);
144                         tegra_iovmm_block_stats(grp->domain, &num,
145                                 &num_free, &total, &total_free, &max_free);
146                         total >>= 10;
147                         total_free >>= 10;
148                         max_free >>= 10;
149                         len += iovmprint("\t\tsize: %uKiB free: %uKiB "
150                                 "largest: %uKiB (%u free / %u total blocks)\n",
151                                 total, total_free, max_free, num_free, num);
152                 }
153         }
154         mutex_unlock(&iovmm_group_list_lock);
155
156         *eof = 1;
157         return len;
158 }
159
160 static void iovmm_block_put(struct tegra_iovmm_block *b)
161 {
162         BUG_ON(b->poison);
163         BUG_ON(atomic_read(&b->ref)==0);
164         if (!atomic_dec_return(&b->ref)) {
165                 b->poison = 0xa5a5a5a5;
166                 kmem_cache_free(iovmm_cache, b);
167         }
168 }
169
170 static void iovmm_free_block(struct tegra_iovmm_domain *domain,
171         struct tegra_iovmm_block *block)
172 {
173         struct tegra_iovmm_block *pred = NULL; /* address-order predecessor */
174         struct tegra_iovmm_block *succ = NULL; /* address-order successor */
175         struct rb_node **p;
176         struct rb_node *parent = NULL, *temp;
177         int pred_free = 0, succ_free = 0;
178
179         iovmm_block_put(block);
180
181         spin_lock(&domain->block_lock);
182         temp = rb_prev(&block->all_node);
183         if (temp)
184                 pred = rb_entry(temp, struct tegra_iovmm_block, all_node);
185         temp = rb_next(&block->all_node);
186         if (temp)
187                 succ = rb_entry(temp, struct tegra_iovmm_block, all_node);
188
189         if (pred) pred_free = test_bit(BK_free, &pred->flags);
190         if (succ) succ_free = test_bit(BK_free, &succ->flags);
191
192         if (pred_free && succ_free) {
193                 iovmm_length(pred) += iovmm_length(block);
194                 iovmm_length(pred) += iovmm_length(succ);
195                 rb_erase(&block->all_node, &domain->all_blocks);
196                 rb_erase(&succ->all_node, &domain->all_blocks);
197                 rb_erase(&succ->free_node, &domain->free_blocks);
198                 rb_erase(&pred->free_node, &domain->free_blocks);
199                 iovmm_block_put(block);
200                 iovmm_block_put(succ);
201                 block = pred;
202         } else if (pred_free) {
203                 iovmm_length(pred) += iovmm_length(block);
204                 rb_erase(&block->all_node, &domain->all_blocks);
205                 rb_erase(&pred->free_node, &domain->free_blocks);
206                 iovmm_block_put(block);
207                 block = pred;
208         } else if (succ_free) {
209                 iovmm_length(block) += iovmm_length(succ);
210                 rb_erase(&succ->all_node, &domain->all_blocks);
211                 rb_erase(&succ->free_node, &domain->free_blocks);
212                 iovmm_block_put(succ);
213         }
214
215         p = &domain->free_blocks.rb_node;
216         while (*p) {
217                 struct tegra_iovmm_block *b;
218                 parent = *p;
219                 b = rb_entry(parent, struct tegra_iovmm_block, free_node);
220                 if (iovmm_length(block) >= iovmm_length(b))
221                         p = &parent->rb_right;
222                 else
223                         p = &parent->rb_left;
224         }
225         rb_link_node(&block->free_node, parent, p);
226         rb_insert_color(&block->free_node, &domain->free_blocks);
227         set_bit(BK_free, &block->flags);
228         spin_unlock(&domain->block_lock);
229 }
230
231 /* if the best-fit block is larger than the requested size, a remainder
232  * block will be created and inserted into the free list in its place.
233  * since all free blocks are stored in two trees the new block needs to be
234  * linked into both. */
235 static void iovmm_split_free_block(struct tegra_iovmm_domain *domain,
236         struct tegra_iovmm_block *block, unsigned long size)
237 {
238         struct rb_node **p;
239         struct rb_node *parent = NULL;
240         struct tegra_iovmm_block *rem;
241         struct tegra_iovmm_block *b;
242
243         rem = kmem_cache_zalloc(iovmm_cache, GFP_KERNEL);
244         if (!rem) return;
245
246         spin_lock(&domain->block_lock);
247         p = &domain->free_blocks.rb_node;
248
249         iovmm_start(rem) = iovmm_start(block) + size;
250         iovmm_length(rem) = iovmm_length(block) - size;
251         atomic_set(&rem->ref, 1);
252         iovmm_length(block) = size;
253
254         while (*p) {
255                 parent = *p;
256                 b = rb_entry(parent, struct tegra_iovmm_block, free_node);
257                 if (iovmm_length(rem) >= iovmm_length(b))
258                         p = &parent->rb_right;
259                 else
260                         p = &parent->rb_left;
261         }
262         set_bit(BK_free, &rem->flags);
263         rb_link_node(&rem->free_node, parent, p);
264         rb_insert_color(&rem->free_node, &domain->free_blocks);
265
266         p = &domain->all_blocks.rb_node;
267         parent = NULL;
268         while (*p) {
269                 parent = *p;
270                 b = rb_entry(parent, struct tegra_iovmm_block, all_node);
271                 if (iovmm_start(rem) >= iovmm_start(b))
272                         p = &parent->rb_right;
273                 else
274                         p = &parent->rb_left;
275         }
276         rb_link_node(&rem->all_node, parent, p);
277         rb_insert_color(&rem->all_node, &domain->all_blocks);
278 }
279
280 static struct tegra_iovmm_block *iovmm_alloc_block(
281         struct tegra_iovmm_domain *domain, unsigned long size)
282 {
283         struct rb_node *n;
284         struct tegra_iovmm_block *b, *best;
285         static int splitting = 0;
286
287         BUG_ON(!size);
288         size = iovmm_align_up(domain->dev, size);
289         for (;;) {
290                 spin_lock(&domain->block_lock);
291                 if (!splitting)
292                         break;
293                 spin_unlock(&domain->block_lock);
294                 schedule();
295         }
296         n = domain->free_blocks.rb_node;
297         best = NULL;
298         while (n) {
299                 b = rb_entry(n, struct tegra_iovmm_block, free_node);
300                 if (iovmm_length(b) < size) {
301                         n = n->rb_right;
302                 } else if (iovmm_length(b) == size) {
303                         best = b;
304                         break;
305                 } else {
306                         best = b;
307                         n = n->rb_left;
308                 }
309         }
310         if (!best) {
311                 spin_unlock(&domain->block_lock);
312                 return NULL;
313         }
314         rb_erase(&best->free_node, &domain->free_blocks);
315         clear_bit(BK_free, &best->flags);
316         atomic_inc(&best->ref);
317         if (iovmm_length(best) >= size+MIN_SPLIT_BYTES(domain)) {
318                 splitting = 1;
319                 spin_unlock(&domain->block_lock);
320                 iovmm_split_free_block(domain, best, size);
321                 splitting = 0;
322         }
323
324         spin_unlock(&domain->block_lock);
325
326         return best;
327 }
328
329 int tegra_iovmm_domain_init(struct tegra_iovmm_domain *domain,
330         struct tegra_iovmm_device *dev, tegra_iovmm_addr_t start,
331         tegra_iovmm_addr_t end)
332 {
333         struct tegra_iovmm_block *b;
334
335         b = kmem_cache_zalloc(iovmm_cache, GFP_KERNEL);
336         if (!b) return -ENOMEM;
337
338         domain->dev = dev;
339         atomic_set(&domain->clients, 0);
340         atomic_set(&domain->locks, 0);
341         atomic_set(&b->ref, 1);
342         spin_lock_init(&domain->block_lock);
343         init_rwsem(&domain->map_lock);
344         init_waitqueue_head(&domain->delay_lock);
345         iovmm_start(b) = iovmm_align_up(dev, start);
346         iovmm_length(b) = iovmm_align_down(dev, end) - iovmm_start(b);
347         set_bit(BK_free, &b->flags);
348         rb_link_node(&b->free_node, NULL, &domain->free_blocks.rb_node);
349         rb_insert_color(&b->free_node, &domain->free_blocks);
350         rb_link_node(&b->all_node, NULL, &domain->all_blocks.rb_node);
351         rb_insert_color(&b->all_node, &domain->all_blocks);
352         return 0;
353 }
354
355 struct tegra_iovmm_area *tegra_iovmm_create_vm(
356         struct tegra_iovmm_client *client, struct tegra_iovmm_area_ops *ops,
357         unsigned long size, pgprot_t pgprot)
358 {
359         struct tegra_iovmm_block *b;
360         struct tegra_iovmm_domain *domain;
361
362         if (!client) return NULL;
363
364         domain = client->domain;
365
366         b = iovmm_alloc_block(domain, size);
367         if (!b) return NULL;
368
369         b->vm_area.domain = domain;
370         b->vm_area.pgprot = pgprot;
371         b->vm_area.ops = ops;
372
373         down_read(&b->vm_area.domain->map_lock);
374         if (ops && !test_bit(CL_locked, &client->flags)) {
375                 set_bit(BK_map_dirty, &b->flags);
376                 set_bit(DM_map_dirty, &client->domain->flags);
377         } else if (ops) {
378                 if (domain->dev->ops->map(domain, &b->vm_area))
379                         pr_err("%s failed to map locked domain\n", __func__);
380         }
381         up_read(&b->vm_area.domain->map_lock);
382
383         return &b->vm_area;
384 }
385
386 void tegra_iovmm_vm_insert_pfn(struct tegra_iovmm_area *vm,
387         tegra_iovmm_addr_t vaddr, unsigned long pfn)
388 {
389         struct tegra_iovmm_domain *domain = vm->domain;
390         BUG_ON(vaddr & ((1<<domain->dev->pgsize_bits)-1));
391         BUG_ON(vaddr >= vm->iovm_start + vm->iovm_length);
392         BUG_ON(vaddr < vm->iovm_start);
393         BUG_ON(vm->ops);
394
395         domain->dev->ops->map_pfn(domain, vm, vaddr, pfn);
396 }
397
398 void tegra_iovmm_zap_vm(struct tegra_iovmm_area *vm)
399 {
400         struct tegra_iovmm_block *b;
401         struct tegra_iovmm_domain *domain;
402
403         b = container_of(vm, struct tegra_iovmm_block, vm_area);
404         domain = vm->domain;
405         /* if the vm area mapping was deferred, don't unmap it since
406          * the memory for the page tables it uses may not be allocated */
407         down_read(&domain->map_lock);
408         if (!test_and_clear_bit(BK_map_dirty, &b->flags))
409                 domain->dev->ops->unmap(domain, vm, false);
410         up_read(&domain->map_lock);
411 }
412
413 void tegra_iovmm_unzap_vm(struct tegra_iovmm_area *vm)
414 {
415         struct tegra_iovmm_block *b;
416         struct tegra_iovmm_domain *domain;
417
418         b = container_of(vm, struct tegra_iovmm_block, vm_area);
419         domain = vm->domain;
420         if (!vm->ops) return;
421
422         down_read(&domain->map_lock);
423         if (vm->ops) {
424                 if (atomic_read(&domain->locks))
425                         domain->dev->ops->map(domain, vm);
426                 else {
427                         set_bit(BK_map_dirty, &b->flags);
428                         set_bit(DM_map_dirty, &domain->flags);
429                 }
430         }
431         up_read(&domain->map_lock);
432 }
433
434 void tegra_iovmm_free_vm(struct tegra_iovmm_area *vm)
435 {
436         struct tegra_iovmm_block *b;
437         struct tegra_iovmm_domain *domain;
438
439         if (!vm) return;
440
441         b = container_of(vm, struct tegra_iovmm_block, vm_area);
442         domain = vm->domain;
443         down_read(&domain->map_lock);
444         if (!test_and_clear_bit(BK_map_dirty, &b->flags))
445                 domain->dev->ops->unmap(domain, vm, true);
446         iovmm_free_block(domain, b);
447         up_read(&domain->map_lock);
448 }
449
450 struct tegra_iovmm_area *tegra_iovmm_area_get(struct tegra_iovmm_area *vm)
451 {
452         struct tegra_iovmm_block *b;
453
454         BUG_ON(!vm);
455         b = container_of(vm, struct tegra_iovmm_block, vm_area);
456
457         atomic_inc(&b->ref);
458         return &b->vm_area;
459 }
460
461 void tegra_iovmm_area_put(struct tegra_iovmm_area *vm)
462 {
463         struct tegra_iovmm_block *b;
464         BUG_ON(!vm);
465         b = container_of(vm, struct tegra_iovmm_block, vm_area);
466         iovmm_block_put(b);
467 }
468
469 struct tegra_iovmm_area *tegra_iovmm_find_area_get(
470         struct tegra_iovmm_client *client, tegra_iovmm_addr_t addr)
471 {
472         struct rb_node *n;
473         struct tegra_iovmm_block *b = NULL;
474
475         if (!client) return NULL;
476
477         spin_lock(&client->domain->block_lock);
478         n = client->domain->all_blocks.rb_node;
479
480         while (n) {
481                 b = rb_entry(n, struct tegra_iovmm_block, all_node);
482                 if ((iovmm_start(b) <= addr) && (iovmm_end(b) >= addr)) {
483                         if (test_bit(BK_free, &b->flags)) b = NULL;
484                         break;
485                 }
486                 if (addr > iovmm_start(b))
487                         n = n->rb_right;
488                 else
489                         n = n->rb_left;
490                 b = NULL;
491         }
492         if (b) atomic_inc(&b->ref);
493         spin_unlock(&client->domain->block_lock);
494         if (!b) return NULL;
495         return &b->vm_area;
496 }
497
498 static int _iovmm_client_lock(struct tegra_iovmm_client *client)
499 {
500         struct tegra_iovmm_device *dev;
501         struct tegra_iovmm_domain *domain;
502         int v;
503
504         if (unlikely(!client)) return -ENODEV;
505         if (unlikely(test_bit(CL_locked, &client->flags))) {
506                 pr_err("attempting to relock client %s\n", client->name);
507                 return 0;
508         }
509
510         domain = client->domain;
511         dev = domain->dev;
512         down_write(&domain->map_lock);
513         v = atomic_inc_return(&domain->locks);
514         /* if the device doesn't export the lock_domain function, the device
515          * must guarantee that any valid domain will be locked. */
516         if (v==1 && dev->ops->lock_domain) {
517                 if (dev->ops->lock_domain(domain, client)) {
518                         atomic_dec(&domain->locks);
519                         up_write(&domain->map_lock);
520                         return -EAGAIN;
521                 }
522         }
523         if (test_and_clear_bit(DM_map_dirty, &domain->flags)) {
524                 struct rb_node *n;
525                 struct tegra_iovmm_block *b;
526
527                 spin_lock(&domain->block_lock);
528                 n = rb_first(&domain->all_blocks);
529                 while (n) {
530                         b = rb_entry(n, struct tegra_iovmm_block, all_node);
531                         n = rb_next(n);
532                         if (test_bit(BK_free, &b->flags))
533                                 continue;
534
535                         if (test_and_clear_bit(BK_map_dirty, &b->flags)) {
536                                 if (!b->vm_area.ops) {
537                                         pr_err("%s: vm_area ops must exist for lazy maps\n", __func__);
538                                         continue;
539                                 }
540                                 dev->ops->map(domain, &b->vm_area);
541                         }
542                 }
543         }
544         set_bit(CL_locked, &client->flags);
545         up_write(&domain->map_lock);
546         return 0;
547 }
548
549 int tegra_iovmm_client_trylock(struct tegra_iovmm_client *client)
550 {
551         return _iovmm_client_lock(client);
552 }
553
554 int tegra_iovmm_client_lock(struct tegra_iovmm_client *client)
555 {
556         int ret;
557
558         if (!client) return -ENODEV;
559
560         ret = wait_event_interruptible(client->domain->delay_lock,
561                 _iovmm_client_lock(client)!=-EAGAIN);
562
563         if (ret==-ERESTARTSYS) return -EINTR;
564
565         return ret;
566 }
567
568 void tegra_iovmm_client_unlock(struct tegra_iovmm_client *client)
569 {
570         struct tegra_iovmm_device *dev;
571         struct tegra_iovmm_domain *domain;
572         int do_wake = 0;
573
574         if (!client) return;
575
576         if (!test_and_clear_bit(CL_locked, &client->flags)) {
577                 pr_err("unlocking unlocked client %s\n", client->name);
578                 return;
579         }
580
581         domain = client->domain;
582         dev = domain->dev;
583         down_write(&domain->map_lock);
584         if (!atomic_dec_return(&domain->locks)) {
585                 if (dev->ops->unlock_domain)
586                         dev->ops->unlock_domain(domain, client);
587                 do_wake = 1;
588         }
589         up_write(&domain->map_lock);
590         if (do_wake) wake_up(&domain->delay_lock);
591 }
592
593 size_t tegra_iovmm_get_vm_size(struct tegra_iovmm_client *client)
594 {
595         struct tegra_iovmm_domain *domain;
596         struct rb_node *n;
597         struct tegra_iovmm_block *b;
598         size_t size = 0;
599
600         if (!client) return 0;
601
602         domain = client->domain;
603
604         spin_lock(&domain->block_lock);
605         n = rb_first(&domain->all_blocks);
606         while (n) {
607                 b = rb_entry(n, struct tegra_iovmm_block, all_node);
608                 n = rb_next(n);
609                 size += iovmm_length(b);
610         }
611         spin_unlock(&domain->block_lock);
612
613         return size;
614 }
615
616 void tegra_iovmm_free_client(struct tegra_iovmm_client *client)
617 {
618         struct tegra_iovmm_device *dev;
619         struct tegra_iovmm_domain *domain;
620         if (!client) return;
621
622         BUG_ON(!client->domain || !client->domain->dev);
623
624         domain = client->domain;
625         dev = domain->dev;
626
627         if (test_and_clear_bit(CL_locked, &client->flags)) {
628                 pr_err("freeing locked client %s\n", client->name);
629                 if (!atomic_dec_return(&domain->locks)) {
630                         down_write(&domain->map_lock);
631                         if (dev->ops->unlock_domain)
632                                 dev->ops->unlock_domain(domain, client);
633                         up_write(&domain->map_lock);
634                         wake_up(&domain->delay_lock);
635                 }
636         }
637         mutex_lock(&iovmm_group_list_lock);
638         if (!atomic_dec_return(&domain->clients))
639                 if (dev->ops->free_domain)
640                         dev->ops->free_domain(domain, client);
641         list_del(&client->list);
642         if (list_empty(&client->group->client_list)) {
643                 list_del(&client->group->group_list);
644                 if (client->group->name) kfree(client->group->name);
645                 kfree(client->group);
646         }
647         kfree(client->name);
648         kfree(client);
649         mutex_unlock(&iovmm_group_list_lock);
650 }
651
652 struct tegra_iovmm_client *tegra_iovmm_alloc_client(const char *name,
653         const char *share_group, struct miscdevice *misc_dev)
654 {
655         struct tegra_iovmm_client *c = kzalloc(sizeof(*c), GFP_KERNEL);
656         struct iovmm_share_group *grp = NULL;
657         struct tegra_iovmm_device *dev;
658         unsigned long flags;
659
660         if (!c) return NULL;
661         c->name = kstrdup(name, GFP_KERNEL);
662         if (!c->name) goto fail;
663         c->misc_dev = misc_dev;
664
665         mutex_lock(&iovmm_group_list_lock);
666         if (share_group) {
667                 list_for_each_entry(grp, &iovmm_groups, group_list) {
668                         if (grp->name && !strcmp(grp->name, share_group))
669                                 break;
670                 }
671         }
672         if (!grp || strcmp(grp->name, share_group)) {
673                 grp = kzalloc(sizeof(*grp), GFP_KERNEL);
674                 if (!grp) goto fail_lock;
675                 grp->name = (share_group) ? kstrdup(share_group, GFP_KERNEL) : NULL;
676                 if (share_group && !grp->name) {
677                         kfree(grp);
678                         goto fail_lock;
679                 }
680                 spin_lock_irqsave(&iovmm_device_list_lock, flags);
681                 list_for_each_entry(dev, &iovmm_devices, list) {
682                         grp->domain = dev->ops->alloc_domain(dev, c);
683                         if (grp->domain) break;
684                 }
685                 spin_unlock_irqrestore(&iovmm_device_list_lock, flags);
686                 if (!grp->domain) {
687                         pr_err("%s: alloc_domain failed for %s\n",
688                                 __func__, c->name);
689                         dump_stack();
690                         if (grp->name) kfree(grp->name);
691                         kfree(grp);
692                         grp = NULL;
693                         goto fail_lock;
694                 }
695                 spin_lock_init(&grp->lock);
696                 INIT_LIST_HEAD(&grp->client_list);
697                 list_add_tail(&grp->group_list, &iovmm_groups);
698         }
699
700         atomic_inc(&grp->domain->clients);
701         c->group = grp;
702         c->domain = grp->domain;
703         spin_lock(&grp->lock);
704         list_add_tail(&c->list, &grp->client_list);
705         spin_unlock(&grp->lock);
706         mutex_unlock(&iovmm_group_list_lock);
707         return c;
708
709 fail_lock:
710         mutex_unlock(&iovmm_group_list_lock);
711 fail:
712         if (c) {
713                 if (c->name) kfree(c->name);
714                 kfree(c);
715         }
716         return NULL;
717 }
718
719 int tegra_iovmm_register(struct tegra_iovmm_device *dev)
720 {
721         BUG_ON(!dev);
722         mutex_lock(&iovmm_group_list_lock);
723         if (list_empty(&iovmm_devices)) {
724                 iovmm_cache = KMEM_CACHE(tegra_iovmm_block, 0);
725                 if (!iovmm_cache) {
726                         pr_err("%s: failed to make kmem cache\n", __func__);
727                         mutex_unlock(&iovmm_group_list_lock);
728                         return -ENOMEM;
729                 }
730                 create_proc_read_entry("iovmminfo", S_IRUGO, NULL,
731                         tegra_iovmm_read_proc, NULL);
732         }
733         list_add_tail(&dev->list, &iovmm_devices);
734         mutex_unlock(&iovmm_group_list_lock);
735         printk("%s: added %s\n", __func__, dev->name);
736         return 0;
737 }
738
739 int tegra_iovmm_unregister(struct tegra_iovmm_device *dev)
740 {
741         mutex_lock(&iovmm_group_list_lock);
742         list_del(&dev->list);
743         mutex_unlock(&iovmm_group_list_lock);
744         return 0;
745 }
746
747 static int tegra_iovmm_suspend(void)
748 {
749         int rc = 0;
750         struct tegra_iovmm_device *dev;
751         unsigned long flags;
752
753         spin_lock_irqsave(&iovmm_device_list_lock, flags);
754         list_for_each_entry(dev, &iovmm_devices, list) {
755
756                 if (!dev->ops->suspend)
757                         continue;
758
759                 rc = dev->ops->suspend(dev);
760                 if (rc) {
761                         pr_err("%s: %s suspend returned %d\n",
762                                __func__, dev->name, rc);
763                         spin_unlock_irqrestore(&iovmm_device_list_lock, flags);
764                         return rc;
765                 }
766         }
767         spin_unlock_irqrestore(&iovmm_device_list_lock, flags);
768         return 0;       
769 }
770
771 static void tegra_iovmm_resume(void)
772 {
773         struct tegra_iovmm_device *dev;
774         unsigned long flags;
775
776         spin_lock_irqsave(&iovmm_device_list_lock, flags);
777
778         list_for_each_entry(dev, &iovmm_devices, list) {
779                 if (dev->ops->resume)
780                         dev->ops->resume(dev);
781         }
782
783         spin_unlock_irqrestore(&iovmm_device_list_lock, flags);
784 }
785
786 static struct syscore_ops tegra_iovmm_syscore_ops = {
787         .suspend = tegra_iovmm_suspend,
788         .resume = tegra_iovmm_resume,
789 };
790
791 static __init int tegra_iovmm_syscore_init(void)
792 {
793         register_syscore_ops(&tegra_iovmm_syscore_ops);
794         return 0;
795 }
796 subsys_initcall(tegra_iovmm_syscore_init);