7a634dd42afc357f44268355d05a8ca296e4071c
[linux-2.6.git] / arch / arm / mach-tegra / iovmm.c
1 /*
2  * arch/arm/mach-tegra/iovmm.c
3  *
4  * Tegra I/O VM manager
5  *
6  * Copyright (c) 2010, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include <linux/kernel.h>
24 #include <linux/spinlock.h>
25 #include <linux/proc_fs.h>
26 #include <linux/sched.h>
27 #include <linux/string.h>
28 #include <linux/slab.h>
29 #include <linux/syscore_ops.h>
30
31 #include <mach/iovmm.h>
32
33 /* after the best-fit block is located, the remaining pages not needed for
34  * the allocation will be split into a new free block if the number of
35  * remaining pages is >= MIN_SPLIT_PAGE.
36  */
37 #define MIN_SPLIT_PAGE (4)
38 #define MIN_SPLIT_BYTES(_d) (MIN_SPLIT_PAGE<<(_d)->dev->pgsize_bits)
39
40 #define iovmm_start(_b) ((_b)->vm_area.iovm_start)
41 #define iovmm_length(_b) ((_b)->vm_area.iovm_length)
42 #define iovmm_end(_b) (iovmm_start(_b) + iovmm_length(_b))
43
44 /* flags for the block */
45 #define BK_free         0 /* indicates free mappings */
46 #define BK_map_dirty    1 /* used by demand-loaded mappings */
47
48 /* flags for the client */
49 #define CL_locked       0
50
51 /* flags for the domain */
52 #define DM_map_dirty    0
53
54 struct tegra_iovmm_block {
55         struct tegra_iovmm_area vm_area;
56         atomic_t                ref;
57         unsigned long           flags;
58         unsigned long           poison;
59         struct rb_node          free_node;
60         struct rb_node          all_node;
61 };
62
63 struct iovmm_share_group {
64         const char                      *name;
65         struct tegra_iovmm_domain       *domain;
66         struct list_head                client_list;
67         struct list_head                group_list;
68         spinlock_t                      lock;
69 };
70
71 static LIST_HEAD(iovmm_devices);
72 static LIST_HEAD(iovmm_groups);
73 static DEFINE_MUTEX(iovmm_group_list_lock);
74 static DEFINE_SPINLOCK(iovmm_device_list_lock);
75 static struct kmem_cache *iovmm_cache;
76
77 static tegra_iovmm_addr_t iovmm_align_up(struct tegra_iovmm_device *dev,
78         tegra_iovmm_addr_t addr)
79 {
80         addr += (1<<dev->pgsize_bits);
81         addr--;
82         addr &= ~((1<<dev->pgsize_bits)-1);
83         return addr;
84 }
85
86 static tegra_iovmm_addr_t iovmm_align_down(struct tegra_iovmm_device *dev,
87         tegra_iovmm_addr_t addr)
88 {
89         addr &= ~((1<<dev->pgsize_bits)-1);
90         return addr;
91 }
92
93 #define iovmprint(fmt, arg...) snprintf(page+len, count-len, fmt, ## arg)
94
95 static void tegra_iovmm_block_stats(struct tegra_iovmm_domain *domain,
96         unsigned int *num_blocks, unsigned int *num_free,
97         tegra_iovmm_addr_t *total, tegra_iovmm_addr_t *total_free,
98         tegra_iovmm_addr_t *max_free)
99 {
100         struct rb_node *n;
101         struct tegra_iovmm_block *b;
102
103         *num_blocks = 0;
104         *num_free = 0;
105         *total = (tegra_iovmm_addr_t)0;
106         *total_free = (tegra_iovmm_addr_t)0;
107         *max_free = (tegra_iovmm_addr_t)0;
108
109         spin_lock(&domain->block_lock);
110         n = rb_first(&domain->all_blocks);
111         while (n) {
112                 b = rb_entry(n, struct tegra_iovmm_block, all_node);
113                 n = rb_next(n);
114                 (*num_blocks)++;
115                 (*total) += iovmm_length(b);
116                 if (test_bit(BK_free, &b->flags)) {
117                         (*num_free)++;
118                         (*total_free) += iovmm_length(b);
119                         (*max_free) = max_t(tegra_iovmm_addr_t,
120                                 (*max_free), iovmm_length(b));
121                 }
122         }
123         spin_unlock(&domain->block_lock);
124 }
125
126 static int tegra_iovmm_read_proc(char *page, char **start, off_t off,
127         int count, int *eof, void *data)
128 {
129         struct iovmm_share_group *grp;
130         tegra_iovmm_addr_t max_free, total_free, total;
131         unsigned int num, num_free;
132
133         int len = 0;
134
135         mutex_lock(&iovmm_group_list_lock);
136         len += iovmprint("\ngroups\n");
137         if (list_empty(&iovmm_groups))
138                 len += iovmprint("\t<empty>\n");
139         else {
140                 list_for_each_entry(grp, &iovmm_groups, group_list) {
141                         len += iovmprint("\t%s (device: %s)\n",
142                                 (grp->name) ? grp->name : "<unnamed>",
143                                 grp->domain->dev->name);
144                         tegra_iovmm_block_stats(grp->domain, &num,
145                                 &num_free, &total, &total_free, &max_free);
146                         total >>= 10;
147                         total_free >>= 10;
148                         max_free >>= 10;
149                         len += iovmprint("\t\tsize: %uKiB free: %uKiB "
150                                 "largest: %uKiB (%u free / %u total blocks)\n",
151                                 total, total_free, max_free, num_free, num);
152                 }
153         }
154         mutex_unlock(&iovmm_group_list_lock);
155
156         *eof = 1;
157         return len;
158 }
159
160 static void iovmm_block_put(struct tegra_iovmm_block *b)
161 {
162         BUG_ON(b->poison);
163         BUG_ON(atomic_read(&b->ref)==0);
164         if (!atomic_dec_return(&b->ref)) {
165                 b->poison = 0xa5a5a5a5;
166                 kmem_cache_free(iovmm_cache, b);
167         }
168 }
169
170 static void iovmm_free_block(struct tegra_iovmm_domain *domain,
171         struct tegra_iovmm_block *block)
172 {
173         struct tegra_iovmm_block *pred = NULL; /* address-order predecessor */
174         struct tegra_iovmm_block *succ = NULL; /* address-order successor */
175         struct rb_node **p;
176         struct rb_node *parent = NULL, *temp;
177         int pred_free = 0, succ_free = 0;
178
179         iovmm_block_put(block);
180
181         spin_lock(&domain->block_lock);
182         temp = rb_prev(&block->all_node);
183         if (temp)
184                 pred = rb_entry(temp, struct tegra_iovmm_block, all_node);
185         temp = rb_next(&block->all_node);
186         if (temp)
187                 succ = rb_entry(temp, struct tegra_iovmm_block, all_node);
188
189         if (pred) pred_free = test_bit(BK_free, &pred->flags);
190         if (succ) succ_free = test_bit(BK_free, &succ->flags);
191
192         if (pred_free && succ_free) {
193                 iovmm_length(pred) += iovmm_length(block);
194                 iovmm_length(pred) += iovmm_length(succ);
195                 rb_erase(&block->all_node, &domain->all_blocks);
196                 rb_erase(&succ->all_node, &domain->all_blocks);
197                 rb_erase(&succ->free_node, &domain->free_blocks);
198                 rb_erase(&pred->free_node, &domain->free_blocks);
199                 iovmm_block_put(block);
200                 iovmm_block_put(succ);
201                 block = pred;
202         } else if (pred_free) {
203                 iovmm_length(pred) += iovmm_length(block);
204                 rb_erase(&block->all_node, &domain->all_blocks);
205                 rb_erase(&pred->free_node, &domain->free_blocks);
206                 iovmm_block_put(block);
207                 block = pred;
208         } else if (succ_free) {
209                 iovmm_length(block) += iovmm_length(succ);
210                 rb_erase(&succ->all_node, &domain->all_blocks);
211                 rb_erase(&succ->free_node, &domain->free_blocks);
212                 iovmm_block_put(succ);
213         }
214
215         p = &domain->free_blocks.rb_node;
216         while (*p) {
217                 struct tegra_iovmm_block *b;
218                 parent = *p;
219                 b = rb_entry(parent, struct tegra_iovmm_block, free_node);
220                 if (iovmm_length(block) >= iovmm_length(b))
221                         p = &parent->rb_right;
222                 else
223                         p = &parent->rb_left;
224         }
225         rb_link_node(&block->free_node, parent, p);
226         rb_insert_color(&block->free_node, &domain->free_blocks);
227         set_bit(BK_free, &block->flags);
228         spin_unlock(&domain->block_lock);
229 }
230
231 /* if the best-fit block is larger than the requested size, a remainder
232  * block will be created and inserted into the free list in its place.
233  * since all free blocks are stored in two trees the new block needs to be
234  * linked into both. */
235 static void iovmm_split_free_block(struct tegra_iovmm_domain *domain,
236         struct tegra_iovmm_block *block, unsigned long size)
237 {
238         struct rb_node **p;
239         struct rb_node *parent = NULL;
240         struct tegra_iovmm_block *rem;
241         struct tegra_iovmm_block *b;
242
243         rem = kmem_cache_zalloc(iovmm_cache, GFP_KERNEL);
244         if (!rem) return;
245
246         spin_lock(&domain->block_lock);
247         p = &domain->free_blocks.rb_node;
248
249         iovmm_start(rem) = iovmm_start(block) + size;
250         iovmm_length(rem) = iovmm_length(block) - size;
251         atomic_set(&rem->ref, 1);
252         iovmm_length(block) = size;
253
254         while (*p) {
255                 parent = *p;
256                 b = rb_entry(parent, struct tegra_iovmm_block, free_node);
257                 if (iovmm_length(rem) >= iovmm_length(b))
258                         p = &parent->rb_right;
259                 else
260                         p = &parent->rb_left;
261         }
262         set_bit(BK_free, &rem->flags);
263         rb_link_node(&rem->free_node, parent, p);
264         rb_insert_color(&rem->free_node, &domain->free_blocks);
265
266         p = &domain->all_blocks.rb_node;
267         parent = NULL;
268         while (*p) {
269                 parent = *p;
270                 b = rb_entry(parent, struct tegra_iovmm_block, all_node);
271                 if (iovmm_start(rem) >= iovmm_start(b))
272                         p = &parent->rb_right;
273                 else
274                         p = &parent->rb_left;
275         }
276         rb_link_node(&rem->all_node, parent, p);
277         rb_insert_color(&rem->all_node, &domain->all_blocks);
278 }
279
280 static struct tegra_iovmm_block *iovmm_alloc_block(
281         struct tegra_iovmm_domain *domain, unsigned long size)
282 {
283         struct rb_node *n;
284         struct tegra_iovmm_block *b, *best;
285         static int splitting = 0;
286
287         BUG_ON(!size);
288         size = iovmm_align_up(domain->dev, size);
289         for (;;) {
290                 spin_lock(&domain->block_lock);
291                 if (!splitting)
292                         break;
293                 spin_unlock(&domain->block_lock);
294                 schedule();
295         }
296         n = domain->free_blocks.rb_node;
297         best = NULL;
298         while (n) {
299                 b = rb_entry(n, struct tegra_iovmm_block, free_node);
300                 if (iovmm_length(b) < size) n = n->rb_right;
301                 else if (iovmm_length(b) == size) {
302                         best = b;
303                         break;
304                 } else {
305                         best = b;
306                         n = n->rb_left;
307                 }
308         }
309         if (!best) {
310                 spin_unlock(&domain->block_lock);
311                 return NULL;
312         }
313         rb_erase(&best->free_node, &domain->free_blocks);
314         clear_bit(BK_free, &best->flags);
315         atomic_inc(&best->ref);
316         if (iovmm_length(best) >= size+MIN_SPLIT_BYTES(domain)) {
317                 splitting = 1;
318                 spin_unlock(&domain->block_lock);
319                 iovmm_split_free_block(domain, best, size);
320                 splitting = 0;
321         }
322
323         spin_unlock(&domain->block_lock);
324
325         return best;
326 }
327
328 int tegra_iovmm_domain_init(struct tegra_iovmm_domain *domain,
329         struct tegra_iovmm_device *dev, tegra_iovmm_addr_t start,
330         tegra_iovmm_addr_t end)
331 {
332         struct tegra_iovmm_block *b;
333
334         b = kmem_cache_zalloc(iovmm_cache, GFP_KERNEL);
335         if (!b) return -ENOMEM;
336
337         domain->dev = dev;
338         atomic_set(&domain->clients, 0);
339         atomic_set(&domain->locks, 0);
340         atomic_set(&b->ref, 1);
341         spin_lock_init(&domain->block_lock);
342         init_rwsem(&domain->map_lock);
343         init_waitqueue_head(&domain->delay_lock);
344         iovmm_start(b) = iovmm_align_up(dev, start);
345         iovmm_length(b) = iovmm_align_down(dev, end) - iovmm_start(b);
346         set_bit(BK_free, &b->flags);
347         rb_link_node(&b->free_node, NULL, &domain->free_blocks.rb_node);
348         rb_insert_color(&b->free_node, &domain->free_blocks);
349         rb_link_node(&b->all_node, NULL, &domain->all_blocks.rb_node);
350         rb_insert_color(&b->all_node, &domain->all_blocks);
351         return 0;
352 }
353
354 struct tegra_iovmm_area *tegra_iovmm_create_vm(
355         struct tegra_iovmm_client *client, struct tegra_iovmm_area_ops *ops,
356         unsigned long size, pgprot_t pgprot)
357 {
358         struct tegra_iovmm_block *b;
359         struct tegra_iovmm_device *dev;
360
361         if (!client) return NULL;
362
363         dev = client->domain->dev;
364
365         b = iovmm_alloc_block(client->domain, size);
366         if (!b) return NULL;
367
368         b->vm_area.domain = client->domain;
369         b->vm_area.pgprot = pgprot;
370         b->vm_area.ops = ops;
371
372         down_read(&b->vm_area.domain->map_lock);
373         if (ops && !test_bit(CL_locked, &client->flags)) {
374                 set_bit(BK_map_dirty, &b->flags);
375                 set_bit(DM_map_dirty, &client->domain->flags);
376         } else if (ops) {
377                 if (dev->ops->map(dev, &b->vm_area))
378                         pr_err("%s failed to map locked domain\n", __func__);
379         }
380         up_read(&b->vm_area.domain->map_lock);
381
382         return &b->vm_area;
383 }
384
385 void tegra_iovmm_vm_insert_pfn(struct tegra_iovmm_area *area,
386         tegra_iovmm_addr_t vaddr, unsigned long pfn)
387 {
388         struct tegra_iovmm_device *dev = area->domain->dev;
389         BUG_ON(vaddr & ((1<<dev->pgsize_bits)-1));
390         BUG_ON(vaddr >= area->iovm_start + area->iovm_length);
391         BUG_ON(vaddr < area->iovm_start);
392         BUG_ON(area->ops);
393
394         dev->ops->map_pfn(dev, area, vaddr, pfn);
395 }
396
397 void tegra_iovmm_zap_vm(struct tegra_iovmm_area *vm)
398 {
399         struct tegra_iovmm_block *b;
400         struct tegra_iovmm_device *dev;
401
402         b = container_of(vm, struct tegra_iovmm_block, vm_area);
403         dev = vm->domain->dev;
404         /* if the vm area mapping was deferred, don't unmap it since
405          * the memory for the page tables it uses may not be allocated */
406         down_read(&vm->domain->map_lock);
407         if (!test_and_clear_bit(BK_map_dirty, &b->flags))
408                 dev->ops->unmap(dev, vm, false);
409         up_read(&vm->domain->map_lock);
410 }
411
412 void tegra_iovmm_unzap_vm(struct tegra_iovmm_area *vm)
413 {
414         struct tegra_iovmm_block *b;
415         struct tegra_iovmm_device *dev;
416
417         b = container_of(vm, struct tegra_iovmm_block, vm_area);
418         dev = vm->domain->dev;
419         if (!vm->ops) return;
420
421         down_read(&vm->domain->map_lock);
422         if (vm->ops) {
423                 if (atomic_read(&vm->domain->locks))
424                         dev->ops->map(dev, vm);
425                 else {
426                         set_bit(BK_map_dirty, &b->flags);
427                         set_bit(DM_map_dirty, &vm->domain->flags);
428                 }
429         }
430         up_read(&vm->domain->map_lock);
431 }
432
433 void tegra_iovmm_free_vm(struct tegra_iovmm_area *vm)
434 {
435         struct tegra_iovmm_block *b;
436         struct tegra_iovmm_device *dev;
437         struct tegra_iovmm_domain *domain;
438
439         if (!vm) return;
440
441         b = container_of(vm, struct tegra_iovmm_block, vm_area);
442         domain = vm->domain;
443         dev = vm->domain->dev;
444         down_read(&domain->map_lock);
445         if (!test_and_clear_bit(BK_map_dirty, &b->flags))
446                 dev->ops->unmap(dev, vm, true);
447         iovmm_free_block(domain, b);
448         up_read(&domain->map_lock);
449 }
450
451 struct tegra_iovmm_area *tegra_iovmm_area_get(struct tegra_iovmm_area *vm)
452 {
453         struct tegra_iovmm_block *b;
454
455         BUG_ON(!vm);
456         b = container_of(vm, struct tegra_iovmm_block, vm_area);
457
458         atomic_inc(&b->ref);
459         return &b->vm_area;
460 }
461
462 void tegra_iovmm_area_put(struct tegra_iovmm_area *vm)
463 {
464         struct tegra_iovmm_block *b;
465         BUG_ON(!vm);
466         b = container_of(vm, struct tegra_iovmm_block, vm_area);
467         iovmm_block_put(b);
468 }
469
470 struct tegra_iovmm_area *tegra_iovmm_find_area_get(
471         struct tegra_iovmm_client *client, tegra_iovmm_addr_t addr)
472 {
473         struct rb_node *n;
474         struct tegra_iovmm_block *b = NULL;
475
476         if (!client) return NULL;
477
478         spin_lock(&client->domain->block_lock);
479         n = client->domain->all_blocks.rb_node;
480
481         while (n) {
482                 b = rb_entry(n, struct tegra_iovmm_block, all_node);
483                 if ((iovmm_start(b) <= addr) && (iovmm_end(b) >= addr)) {
484                         if (test_bit(BK_free, &b->flags)) b = NULL;
485                         break;
486                 }
487                 if (addr > iovmm_start(b))
488                         n = n->rb_right;
489                 else
490                         n = n->rb_left;
491                 b = NULL;
492         }
493         if (b) atomic_inc(&b->ref);
494         spin_unlock(&client->domain->block_lock);
495         if (!b) return NULL;
496         return &b->vm_area;
497 }
498
499 static int _iovmm_client_lock(struct tegra_iovmm_client *client)
500 {
501         struct tegra_iovmm_device *dev;
502         struct tegra_iovmm_domain *domain;
503         int v;
504
505         if (unlikely(!client)) return -ENODEV;
506         if (unlikely(test_bit(CL_locked, &client->flags))) {
507                 pr_err("attempting to relock client %s\n", client->name);
508                 return 0;
509         }
510
511         domain = client->domain;
512         dev = domain->dev;
513         down_write(&domain->map_lock);
514         v = atomic_inc_return(&domain->locks);
515         /* if the device doesn't export the lock_domain function, the device
516          * must guarantee that any valid domain will be locked. */
517         if (v==1 && dev->ops->lock_domain) {
518                 if (dev->ops->lock_domain(dev, domain)) {
519                         atomic_dec(&domain->locks);
520                         up_write(&domain->map_lock);
521                         return -EAGAIN;
522                 }
523         }
524         if (test_and_clear_bit(DM_map_dirty, &domain->flags)) {
525                 struct rb_node *n;
526                 struct tegra_iovmm_block *b;
527
528                 spin_lock(&domain->block_lock);
529                 n = rb_first(&domain->all_blocks);
530                 while (n) {
531                         b = rb_entry(n, struct tegra_iovmm_block, all_node);
532                         n = rb_next(n);
533                         if (test_bit(BK_free, &b->flags))
534                                 continue;
535
536                         if (test_and_clear_bit(BK_map_dirty, &b->flags)) {
537                                 if (!b->vm_area.ops) {
538                                         pr_err("%s: vm_area ops must exist for lazy maps\n", __func__);
539                                         continue;
540                                 }
541                                 dev->ops->map(dev, &b->vm_area);
542                         }
543                 }
544         }
545         set_bit(CL_locked, &client->flags);
546         up_write(&domain->map_lock);
547         return 0;
548 }
549
550 int tegra_iovmm_client_trylock(struct tegra_iovmm_client *client)
551 {
552         return _iovmm_client_lock(client);
553 }
554
555 int tegra_iovmm_client_lock(struct tegra_iovmm_client *client)
556 {
557         int ret;
558
559         if (!client) return -ENODEV;
560
561         ret = wait_event_interruptible(client->domain->delay_lock,
562                 _iovmm_client_lock(client)!=-EAGAIN);
563
564         if (ret==-ERESTARTSYS) return -EINTR;
565
566         return ret;
567 }
568
569 void tegra_iovmm_client_unlock(struct tegra_iovmm_client *client)
570 {
571         struct tegra_iovmm_device *dev;
572         struct tegra_iovmm_domain *domain;
573         int do_wake = 0;
574
575         if (!client) return;
576
577         if (!test_and_clear_bit(CL_locked, &client->flags)) {
578                 pr_err("unlocking unlocked client %s\n", client->name);
579                 return;
580         }
581
582         domain = client->domain;
583         dev = domain->dev;
584         down_write(&domain->map_lock);
585         if (!atomic_dec_return(&client->domain->locks)) {
586                 if (dev->ops->unlock_domain)
587                         dev->ops->unlock_domain(dev, domain);
588                 do_wake = 1;
589         }
590         up_write(&domain->map_lock);
591         if (do_wake) wake_up(&domain->delay_lock);
592 }
593
594 size_t tegra_iovmm_get_vm_size(struct tegra_iovmm_client *client)
595 {
596         struct tegra_iovmm_domain *domain;
597         struct rb_node *n;
598         struct tegra_iovmm_block *b;
599         size_t size = 0;
600
601         if (!client) return 0;
602
603         domain = client->domain;
604
605         spin_lock(&domain->block_lock);
606         n = rb_first(&domain->all_blocks);
607         while (n) {
608                 b = rb_entry(n, struct tegra_iovmm_block, all_node);
609                 n = rb_next(n);
610                 size += iovmm_length(b);
611         }
612         spin_unlock(&domain->block_lock);
613
614         return size;
615 }
616
617 void tegra_iovmm_free_client(struct tegra_iovmm_client *client)
618 {
619         struct tegra_iovmm_device *dev;
620         if (!client) return;
621
622         BUG_ON(!client->domain || !client->domain->dev);
623
624         dev = client->domain->dev;
625
626         if (test_and_clear_bit(CL_locked, &client->flags)) {
627                 pr_err("freeing locked client %s\n", client->name);
628                 if (!atomic_dec_return(&client->domain->locks)) {
629                         down_write(&client->domain->map_lock);
630                         if (dev->ops->unlock_domain)
631                                 dev->ops->unlock_domain(dev, client->domain);
632                         up_write(&client->domain->map_lock);
633                         wake_up(&client->domain->delay_lock);
634                 }
635         }
636         mutex_lock(&iovmm_group_list_lock);
637         if (!atomic_dec_return(&client->domain->clients))
638                 if (dev->ops->free_domain)
639                         dev->ops->free_domain(dev, client->domain);
640         list_del(&client->list);
641         if (list_empty(&client->group->client_list)) {
642                 list_del(&client->group->group_list);
643                 if (client->group->name) kfree(client->group->name);
644                 kfree(client->group);
645         }
646         kfree(client->name);
647         kfree(client);
648         mutex_unlock(&iovmm_group_list_lock);
649 }
650
651 struct tegra_iovmm_client *tegra_iovmm_alloc_client(const char *name,
652         const char *share_group)
653 {
654         struct tegra_iovmm_client *c = kzalloc(sizeof(*c), GFP_KERNEL);
655         struct iovmm_share_group *grp = NULL;
656         struct tegra_iovmm_device *dev;
657         unsigned long flags;
658
659         if (!c) return NULL;
660         c->name = kstrdup(name, GFP_KERNEL);
661         if (!c->name) goto fail;
662
663         mutex_lock(&iovmm_group_list_lock);
664         if (share_group) {
665                 list_for_each_entry(grp, &iovmm_groups, group_list) {
666                         if (grp->name && !strcmp(grp->name, share_group))
667                                 break;
668                 }
669         }
670         if (!grp || strcmp(grp->name, share_group)) {
671                 grp = kzalloc(sizeof(*grp), GFP_KERNEL);
672                 if (!grp) goto fail_lock;
673                 grp->name = (share_group) ? kstrdup(share_group, GFP_KERNEL) : NULL;
674                 if (share_group && !grp->name) {
675                         kfree(grp);
676                         goto fail_lock;
677                 }
678                 spin_lock_irqsave(&iovmm_device_list_lock, flags);
679                 list_for_each_entry(dev, &iovmm_devices, list) {
680                         grp->domain = dev->ops->alloc_domain(dev, c);
681                         if (grp->domain) break;
682                 }
683                 spin_unlock_irqrestore(&iovmm_device_list_lock, flags);
684                 if (!grp->domain) {
685                         pr_err("%s: alloc_domain failed for %s\n",
686                                 __func__, c->name);
687                         dump_stack();
688                         if (grp->name) kfree(grp->name);
689                         kfree(grp);
690                         grp = NULL;
691                         goto fail_lock;
692                 }
693                 spin_lock_init(&grp->lock);
694                 INIT_LIST_HEAD(&grp->client_list);
695                 list_add_tail(&grp->group_list, &iovmm_groups);
696         }
697
698         atomic_inc(&grp->domain->clients);
699         c->group = grp;
700         c->domain = grp->domain;
701         spin_lock(&grp->lock);
702         list_add_tail(&c->list, &grp->client_list);
703         spin_unlock(&grp->lock);
704         mutex_unlock(&iovmm_group_list_lock);
705         return c;
706
707 fail_lock:
708         mutex_unlock(&iovmm_group_list_lock);
709 fail:
710         if (c) {
711                 if (c->name) kfree(c->name);
712                 kfree(c);
713         }
714         return NULL;
715 }
716
717 int tegra_iovmm_register(struct tegra_iovmm_device *dev)
718 {
719         BUG_ON(!dev);
720         mutex_lock(&iovmm_group_list_lock);
721         if (list_empty(&iovmm_devices)) {
722                 iovmm_cache = KMEM_CACHE(tegra_iovmm_block, 0);
723                 if (!iovmm_cache) {
724                         pr_err("%s: failed to make kmem cache\n", __func__);
725                         mutex_unlock(&iovmm_group_list_lock);
726                         return -ENOMEM;
727                 }
728                 create_proc_read_entry("iovmminfo", S_IRUGO, NULL,
729                         tegra_iovmm_read_proc, NULL);
730         }
731         list_add_tail(&dev->list, &iovmm_devices);
732         mutex_unlock(&iovmm_group_list_lock);
733         printk("%s: added %s\n", __func__, dev->name);
734         return 0;
735 }
736
737 int tegra_iovmm_unregister(struct tegra_iovmm_device *dev)
738 {
739         mutex_lock(&iovmm_group_list_lock);
740         list_del(&dev->list);
741         mutex_unlock(&iovmm_group_list_lock);
742         return 0;
743 }
744
745 static int tegra_iovmm_suspend(void)
746 {
747         int rc = 0;
748         struct tegra_iovmm_device *dev;
749         unsigned long flags;
750
751         spin_lock_irqsave(&iovmm_device_list_lock, flags);
752         list_for_each_entry(dev, &iovmm_devices, list) {
753
754                 if (!dev->ops->suspend)
755                         continue;
756
757                 rc = dev->ops->suspend(dev);
758                 if (rc) {
759                         pr_err("%s: %s suspend returned %d\n",
760                                __func__, dev->name, rc);
761                         spin_unlock_irqrestore(&iovmm_device_list_lock, flags);
762                         return rc;
763                 }
764         }
765         spin_unlock_irqrestore(&iovmm_device_list_lock, flags);
766         return 0;       
767 }
768
769 static void tegra_iovmm_resume(void)
770 {
771         struct tegra_iovmm_device *dev;
772         unsigned long flags;
773
774         spin_lock_irqsave(&iovmm_device_list_lock, flags);
775
776         list_for_each_entry(dev, &iovmm_devices, list) {
777                 if (dev->ops->resume)
778                         dev->ops->resume(dev);
779         }
780
781         spin_unlock_irqrestore(&iovmm_device_list_lock, flags);
782 }
783
784 static struct syscore_ops tegra_iovmm_syscore_ops = {
785         .suspend = tegra_iovmm_suspend,
786         .resume = tegra_iovmm_resume,
787 };
788
789 static __init int tegra_iovmm_syscore_init(void)
790 {
791         register_syscore_ops(&tegra_iovmm_syscore_ops);
792         return 0;
793 }
794 subsys_initcall(tegra_iovmm_syscore_init);