2 * drivers/video/tegra/nvmap/nvmap_heap.c
6 * Copyright (c) 2011-2013, NVIDIA Corporation.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
23 #include <linux/device.h>
24 #include <linux/kernel.h>
25 #include <linux/list.h>
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29 #include <linux/err.h>
30 #include <linux/bug.h>
31 #include <linux/stat.h>
33 #include <linux/nvmap.h>
34 #include "nvmap_priv.h"
35 #include "nvmap_heap.h"
36 #include "nvmap_common.h"
38 #include <asm/tlbflush.h>
39 #include <asm/cacheflush.h>
42 * "carveouts" are platform-defined regions of physically contiguous memory
43 * which are not managed by the OS. a platform may specify multiple carveouts,
44 * for either small special-purpose memory regions (like IRAM on Tegra SoCs)
45 * or reserved regions of main system memory.
47 * the carveout allocator returns allocations which are physically contiguous.
48 * to reduce external fragmentation, the allocation algorithm implemented in
49 * this file employs 3 strategies for keeping allocations of similar size
50 * grouped together inside the larger heap: the "small", "normal" and "huge"
51 * strategies. the size thresholds (in bytes) for determining which strategy
52 * to employ should be provided by the platform for each heap. it is possible
53 * for a platform to define a heap where only the "normal" strategy is used.
55 * o "normal" allocations use an address-order first-fit allocator (called
56 * BOTTOM_UP in the code below). each allocation is rounded up to be
57 * an integer multiple of the "small" allocation size.
59 * o "huge" allocations use an address-order last-fit allocator (called
60 * TOP_DOWN in the code below). like "normal" allocations, each allocation
61 * is rounded up to be an integer multiple of the "small" allocation size.
63 * o "small" allocations are treated differently: the heap manager maintains
64 * a pool of "small"-sized blocks internally from which allocations less
65 * than 1/2 of the "small" size are buddy-allocated. if a "small" allocation
66 * is requested and none of the buddy sub-heaps is able to service it,
67 * the heap manager will try to allocate a new buddy-heap.
69 * this allocator is intended to keep "splinters" colocated in the carveout,
70 * and to ensure that the minimum free block size in the carveout (i.e., the
71 * "small" threshold) is still a meaningful size.
75 #define MAX_BUDDY_NR 128 /* maximum buddies in a buddy allocator */
83 BLOCK_FIRST_FIT, /* block was allocated directly from the heap */
84 BLOCK_BUDDY, /* block was allocated from a buddy sub-heap */
89 size_t free; /* total free size */
90 size_t free_largest; /* largest free block */
91 size_t free_count; /* number of free blocks */
92 size_t total; /* total size */
93 size_t largest; /* largest unique block */
94 size_t count; /* total number of blocks */
95 /* fast compaction attempt counter */
96 unsigned int compaction_count_fast;
97 /* full compaction attempt counter */
98 unsigned int compaction_count_full;
104 struct nvmap_heap_block block;
105 struct buddy_heap *heap;
109 struct nvmap_heap_block block;
110 struct list_head all_list;
111 unsigned int mem_prot;
112 phys_addr_t orig_addr;
115 struct nvmap_heap *heap;
116 struct list_head free_list;
121 struct list_block lb;
122 struct buddy_block bb;
127 unsigned int alloc:1;
128 unsigned int order:7; /* log2(MAX_BUDDY_NR); */
132 struct list_block *heap_base;
133 unsigned int nr_buddies;
134 struct list_head buddy_list;
135 struct buddy_bits bitmap[MAX_BUDDY_NR];
139 struct list_head all_list;
140 struct list_head free_list;
142 struct list_head buddy_list;
143 unsigned int min_buddy_shift;
144 unsigned int buddy_heap_size;
145 unsigned int small_alloc;
151 static struct kmem_cache *buddy_heap_cache;
152 static struct kmem_cache *block_cache;
154 static inline struct nvmap_heap *parent_of(struct buddy_heap *heap)
156 return heap->heap_base->heap;
159 static inline unsigned int order_of(size_t len, size_t min_shift)
161 len = 2 * DIV_ROUND_UP(len, (1 << min_shift)) - 1;
165 /* returns the free size in bytes of the buddy heap; must be called while
166 * holding the parent heap's lock. */
167 static void buddy_stat(struct buddy_heap *heap, struct heap_stat *stat)
170 unsigned int shift = parent_of(heap)->min_buddy_shift;
172 for (index = 0; index < heap->nr_buddies;
173 index += (1 << heap->bitmap[index].order)) {
174 size_t curr = 1 << (heap->bitmap[index].order + shift);
176 stat->largest = max(stat->largest, curr);
180 if (!heap->bitmap[index].alloc) {
182 stat->free_largest = max(stat->free_largest, curr);
188 /* returns the free size of the heap (including any free blocks in any
189 * buddy-heap suballocators; must be called while holding the parent
191 static phys_addr_t heap_stat(struct nvmap_heap *heap, struct heap_stat *stat)
193 struct buddy_heap *bh;
194 struct list_block *l = NULL;
195 phys_addr_t base = -1ul;
197 memset(stat, 0, sizeof(*stat));
198 mutex_lock(&heap->lock);
199 list_for_each_entry(l, &heap->all_list, all_list) {
200 stat->total += l->size;
201 stat->largest = max(l->size, stat->largest);
203 base = min(base, l->orig_addr);
206 list_for_each_entry(bh, &heap->buddy_list, buddy_list) {
207 buddy_stat(bh, stat);
208 /* the total counts are double-counted for buddy heaps
209 * since the blocks allocated for buddy heaps exist in the
210 * all_list; subtract out the doubly-added stats */
211 stat->total -= bh->heap_base->size;
215 list_for_each_entry(l, &heap->free_list, free_list) {
216 stat->free += l->size;
218 stat->free_largest = max(l->size, stat->free_largest);
220 mutex_unlock(&heap->lock);
225 static ssize_t heap_name_show(struct device *dev,
226 struct device_attribute *attr, char *buf);
228 static ssize_t heap_stat_show(struct device *dev,
229 struct device_attribute *attr, char *buf);
231 static struct device_attribute heap_stat_total_max =
232 __ATTR(total_max, S_IRUGO, heap_stat_show, NULL);
234 static struct device_attribute heap_stat_total_count =
235 __ATTR(total_count, S_IRUGO, heap_stat_show, NULL);
237 static struct device_attribute heap_stat_total_size =
238 __ATTR(total_size, S_IRUGO, heap_stat_show, NULL);
240 static struct device_attribute heap_stat_free_max =
241 __ATTR(free_max, S_IRUGO, heap_stat_show, NULL);
243 static struct device_attribute heap_stat_free_count =
244 __ATTR(free_count, S_IRUGO, heap_stat_show, NULL);
246 static struct device_attribute heap_stat_free_size =
247 __ATTR(free_size, S_IRUGO, heap_stat_show, NULL);
249 static struct device_attribute heap_stat_base =
250 __ATTR(base, S_IRUGO, heap_stat_show, NULL);
252 static struct device_attribute heap_attr_name =
253 __ATTR(name, S_IRUGO, heap_name_show, NULL);
255 static struct attribute *heap_stat_attrs[] = {
256 &heap_stat_total_max.attr,
257 &heap_stat_total_count.attr,
258 &heap_stat_total_size.attr,
259 &heap_stat_free_max.attr,
260 &heap_stat_free_count.attr,
261 &heap_stat_free_size.attr,
262 &heap_stat_base.attr,
263 &heap_attr_name.attr,
267 static struct attribute_group heap_stat_attr_group = {
268 .attrs = heap_stat_attrs,
271 static ssize_t heap_name_show(struct device *dev,
272 struct device_attribute *attr, char *buf)
275 struct nvmap_heap *heap = container_of(dev, struct nvmap_heap, dev);
276 return sprintf(buf, "%s\n", heap->name);
279 static ssize_t heap_stat_show(struct device *dev,
280 struct device_attribute *attr, char *buf)
282 struct nvmap_heap *heap = container_of(dev, struct nvmap_heap, dev);
283 struct heap_stat stat;
286 base = heap_stat(heap, &stat);
288 if (attr == &heap_stat_total_max)
289 return sprintf(buf, "%zu\n", stat.largest);
290 else if (attr == &heap_stat_total_count)
291 return sprintf(buf, "%zu\n", stat.count);
292 else if (attr == &heap_stat_total_size)
293 return sprintf(buf, "%zu\n", stat.total);
294 else if (attr == &heap_stat_free_max)
295 return sprintf(buf, "%zu\n", stat.free_largest);
296 else if (attr == &heap_stat_free_count)
297 return sprintf(buf, "%zu\n", stat.free_count);
298 else if (attr == &heap_stat_free_size)
299 return sprintf(buf, "%zu\n", stat.free);
300 else if (attr == &heap_stat_base)
301 return sprintf(buf, "%08llx\n", (unsigned long long)base);
305 static struct nvmap_heap_block *buddy_alloc(struct buddy_heap *heap,
306 size_t size, size_t align,
307 unsigned int mem_prot)
309 unsigned int index = 0;
310 unsigned int min_shift = parent_of(heap)->min_buddy_shift;
311 unsigned int order = order_of(size, min_shift);
312 unsigned int align_mask;
313 unsigned int best = heap->nr_buddies;
314 struct buddy_block *b;
316 if (heap->heap_base->mem_prot != mem_prot)
319 align = max(align, (size_t)(1 << min_shift));
320 align_mask = (align >> min_shift) - 1;
322 for (index = 0; index < heap->nr_buddies;
323 index += (1 << heap->bitmap[index].order)) {
325 if (heap->bitmap[index].alloc || (index & align_mask) ||
326 (heap->bitmap[index].order < order))
329 if (best == heap->nr_buddies ||
330 heap->bitmap[index].order < heap->bitmap[best].order)
333 if (heap->bitmap[best].order == order)
337 if (best == heap->nr_buddies)
340 b = kmem_cache_zalloc(block_cache, GFP_KERNEL);
344 while (heap->bitmap[best].order != order) {
346 heap->bitmap[best].order--;
347 buddy = best ^ (1 << heap->bitmap[best].order);
348 heap->bitmap[buddy].order = heap->bitmap[best].order;
349 heap->bitmap[buddy].alloc = 0;
351 heap->bitmap[best].alloc = 1;
352 b->block.base = heap->heap_base->block.base + (best << min_shift);
354 b->block.type = BLOCK_BUDDY;
358 static struct buddy_heap *do_buddy_free(struct nvmap_heap_block *block)
360 struct buddy_block *b = container_of(block, struct buddy_block, block);
361 struct buddy_heap *h = b->heap;
362 unsigned int min_shift = parent_of(h)->min_buddy_shift;
365 index = (block->base - h->heap_base->block.base) >> min_shift;
366 h->bitmap[index].alloc = 0;
369 unsigned int buddy = index ^ (1 << h->bitmap[index].order);
370 if (buddy >= h->nr_buddies || h->bitmap[buddy].alloc ||
371 h->bitmap[buddy].order != h->bitmap[index].order)
374 h->bitmap[buddy].order++;
375 h->bitmap[index].order++;
376 index = min(buddy, index);
379 kmem_cache_free(block_cache, b);
380 if ((1 << h->bitmap[0].order) == h->nr_buddies)
388 * base_max limits position of allocated chunk in memory.
389 * if base_max is 0 then there is no such limitation.
391 static struct nvmap_heap_block *do_heap_alloc(struct nvmap_heap *heap,
392 size_t len, size_t align,
393 unsigned int mem_prot,
394 phys_addr_t base_max)
396 struct list_block *b = NULL;
397 struct list_block *i = NULL;
398 struct list_block *rem = NULL;
399 phys_addr_t fix_base;
402 /* since pages are only mappable with one cache attribute,
403 * and most allocations from carveout heaps are DMA coherent
404 * (i.e., non-cacheable), round cacheable allocations up to
405 * a page boundary to ensure that the physical pages will
406 * only be mapped one way. */
407 if (mem_prot == NVMAP_HANDLE_CACHEABLE ||
408 mem_prot == NVMAP_HANDLE_INNER_CACHEABLE) {
409 align = max_t(size_t, align, PAGE_SIZE);
410 len = PAGE_ALIGN(len);
413 dir = (len <= heap->small_alloc) ? BOTTOM_UP : TOP_DOWN;
415 if (dir == BOTTOM_UP) {
416 list_for_each_entry(i, &heap->free_list, free_list) {
418 fix_base = ALIGN(i->block.base, align);
419 if(!fix_base || fix_base >= i->block.base + i->size)
422 fix_size = i->size - (fix_base - i->block.base);
424 /* needed for compaction. relocated chunk
425 * should never go up */
426 if (base_max && fix_base > base_max)
429 if (fix_size >= len) {
435 list_for_each_entry_reverse(i, &heap->free_list, free_list) {
436 if (i->size >= len) {
437 fix_base = i->block.base + i->size - len;
438 fix_base &= ~(align-1);
439 if (fix_base >= i->block.base) {
450 if (dir == BOTTOM_UP)
451 b->block.type = BLOCK_FIRST_FIT;
453 /* split free block */
454 if (b->block.base != fix_base) {
455 /* insert a new free block before allocated */
456 rem = kmem_cache_zalloc(block_cache, GFP_KERNEL);
458 b->orig_addr = b->block.base;
459 b->block.base = fix_base;
460 b->size -= (b->block.base - b->orig_addr);
464 rem->block.type = BLOCK_EMPTY;
465 rem->block.base = b->block.base;
466 rem->orig_addr = rem->block.base;
467 rem->size = fix_base - rem->block.base;
468 b->block.base = fix_base;
469 b->orig_addr = fix_base;
470 b->size -= rem->size;
471 list_add_tail(&rem->all_list, &b->all_list);
472 list_add_tail(&rem->free_list, &b->free_list);
475 b->orig_addr = b->block.base;
478 /* insert a new free block after allocated */
479 rem = kmem_cache_zalloc(block_cache, GFP_KERNEL);
483 rem->block.type = BLOCK_EMPTY;
484 rem->block.base = b->block.base + len;
485 rem->size = b->size - len;
486 BUG_ON(rem->size > b->size);
487 rem->orig_addr = rem->block.base;
489 list_add(&rem->all_list, &b->all_list);
490 list_add(&rem->free_list, &b->free_list);
494 list_del(&b->free_list);
496 b->mem_prot = mem_prot;
501 #ifdef DEBUG_FREE_LIST
502 static void freelist_debug(struct nvmap_heap *heap, const char *title,
503 struct list_block *token)
506 struct list_block *n;
508 dev_debug(&heap->dev, "%s\n", title);
510 list_for_each_entry(n, &heap->free_list, free_list) {
511 dev_debug(&heap->dev, "\t%d [%p..%p]%s\n", i, (void *)n->orig_addr,
512 (void *)(n->orig_addr + n->size),
513 (n == token) ? "<--" : "");
518 #define freelist_debug(_heap, _title, _token) do { } while (0)
521 static struct list_block *do_heap_free(struct nvmap_heap_block *block)
523 struct list_block *b = container_of(block, struct list_block, block);
524 struct list_block *n = NULL;
525 struct nvmap_heap *heap = b->heap;
527 BUG_ON(b->block.base > b->orig_addr);
528 b->size += (b->block.base - b->orig_addr);
529 b->block.base = b->orig_addr;
531 freelist_debug(heap, "free list before", b);
533 /* Find position of first free block to the right of freed one */
534 list_for_each_entry(n, &heap->free_list, free_list) {
535 if (n->block.base > b->block.base)
539 /* Add freed block before found free one */
540 list_add_tail(&b->free_list, &n->free_list);
541 BUG_ON(list_empty(&b->all_list));
543 freelist_debug(heap, "free list pre-merge", b);
545 /* merge freed block with next if they connect
546 * freed block becomes bigger, next one is destroyed */
547 if (!list_is_last(&b->free_list, &heap->free_list)) {
548 n = list_first_entry(&b->free_list, struct list_block, free_list);
549 if (n->block.base == b->block.base + b->size) {
550 list_del(&n->all_list);
551 list_del(&n->free_list);
552 BUG_ON(b->orig_addr >= n->orig_addr);
554 kmem_cache_free(block_cache, n);
558 /* merge freed block with prev if they connect
559 * previous free block becomes bigger, freed one is destroyed */
560 if (b->free_list.prev != &heap->free_list) {
561 n = list_entry(b->free_list.prev, struct list_block, free_list);
562 if (n->block.base + n->size == b->block.base) {
563 list_del(&b->all_list);
564 list_del(&b->free_list);
565 BUG_ON(n->orig_addr >= b->orig_addr);
567 kmem_cache_free(block_cache, b);
572 freelist_debug(heap, "free list after", b);
573 b->block.type = BLOCK_EMPTY;
577 static struct nvmap_heap_block *do_buddy_alloc(struct nvmap_heap *h,
578 size_t len, size_t align,
579 unsigned int mem_prot)
581 struct buddy_heap *bh;
582 struct nvmap_heap_block *b = NULL;
584 list_for_each_entry(bh, &h->buddy_list, buddy_list) {
585 b = buddy_alloc(bh, len, align, mem_prot);
590 /* no buddy heaps could service this allocation: try to create a new
591 * buddy heap instead */
592 bh = kmem_cache_zalloc(buddy_heap_cache, GFP_KERNEL);
596 b = do_heap_alloc(h, h->buddy_heap_size,
597 h->buddy_heap_size, mem_prot, 0);
599 kmem_cache_free(buddy_heap_cache, bh);
603 bh->heap_base = container_of(b, struct list_block, block);
604 bh->nr_buddies = h->buddy_heap_size >> h->min_buddy_shift;
605 bh->bitmap[0].alloc = 0;
606 bh->bitmap[0].order = order_of(h->buddy_heap_size, h->min_buddy_shift);
607 list_add_tail(&bh->buddy_list, &h->buddy_list);
608 return buddy_alloc(bh, len, align, mem_prot);
611 /* nvmap_heap_alloc: allocates a block of memory of len bytes, aligned to
613 struct nvmap_heap_block *nvmap_heap_alloc(struct nvmap_heap *h,
614 struct nvmap_handle *handle)
616 struct nvmap_heap_block *b;
617 size_t len = handle->size;
618 size_t align = handle->align;
619 unsigned int prot = handle->flags;
621 mutex_lock(&h->lock);
623 if (len <= h->buddy_heap_size / 2) {
624 b = do_buddy_alloc(h, len, align, prot);
626 if (h->buddy_heap_size)
627 len = ALIGN(len, h->buddy_heap_size);
628 align = max(align, (size_t)L1_CACHE_BYTES);
629 b = do_heap_alloc(h, len, align, prot, 0);
634 handle->carveout = b;
636 mutex_unlock(&h->lock);
640 struct nvmap_heap *nvmap_block_to_heap(struct nvmap_heap_block *b)
642 if (b->type == BLOCK_BUDDY) {
643 struct buddy_block *bb;
644 bb = container_of(b, struct buddy_block, block);
645 return parent_of(bb->heap);
647 struct list_block *lb;
648 lb = container_of(b, struct list_block, block);
653 /* nvmap_heap_free: frees block b*/
654 void nvmap_heap_free(struct nvmap_heap_block *b)
656 struct buddy_heap *bh = NULL;
657 struct nvmap_heap *h = nvmap_block_to_heap(b);
658 struct list_block *lb;
660 mutex_lock(&h->lock);
661 if (b->type == BLOCK_BUDDY)
662 bh = do_buddy_free(b);
664 lb = container_of(b, struct list_block, block);
665 nvmap_flush_heap_block(NULL, b, lb->size, lb->mem_prot);
670 list_del(&bh->buddy_list);
671 mutex_unlock(&h->lock);
672 nvmap_heap_free(&bh->heap_base->block);
673 kmem_cache_free(buddy_heap_cache, bh);
675 mutex_unlock(&h->lock);
679 static void heap_release(struct device *heap)
683 /* nvmap_heap_create: create a heap object of len bytes, starting from
686 * if buddy_size is >= NVMAP_HEAP_MIN_BUDDY_SIZE, then allocations <= 1/2
687 * of the buddy heap size will use a buddy sub-allocator, where each buddy
688 * heap is buddy_size bytes (should be a power of 2). all other allocations
689 * will be rounded up to be a multiple of buddy_size bytes.
691 struct nvmap_heap *nvmap_heap_create(struct device *parent, const char *name,
692 phys_addr_t base, size_t len,
693 size_t buddy_size, void *arg)
695 struct nvmap_heap *h = NULL;
696 struct list_block *l = NULL;
697 DEFINE_DMA_ATTRS(attrs);
699 if (WARN_ON(buddy_size && buddy_size < NVMAP_HEAP_MIN_BUDDY_SIZE)) {
700 dev_warn(parent, "%s: buddy_size %zu too small\n", __func__,
703 } else if (WARN_ON(buddy_size >= len)) {
704 dev_warn(parent, "%s: buddy_size %zu too large\n", __func__,
707 } else if (WARN_ON(buddy_size & (buddy_size - 1))) {
708 dev_warn(parent, "%s: buddy_size %zu not a power of 2\n",
709 __func__, buddy_size);
710 buddy_size = 1 << (ilog2(buddy_size) + 1);
713 if (WARN_ON(buddy_size && (base & (buddy_size - 1)))) {
714 phys_addr_t orig = base;
715 dev_warn(parent, "%s: base address 0x%llx not aligned to "
716 "buddy_size %zu\n", __func__, (u64)base, buddy_size);
717 base = ALIGN(base, buddy_size);
718 len -= (base - orig);
721 if (WARN_ON(buddy_size && (len & (buddy_size - 1)))) {
722 dev_warn(parent, "%s: length %zu not aligned to "
723 "buddy_size %zu\n", __func__, len, buddy_size);
724 len &= ~(buddy_size - 1);
727 h = kzalloc(sizeof(*h), GFP_KERNEL);
729 dev_err(parent, "%s: out of memory\n", __func__);
733 l = kmem_cache_zalloc(block_cache, GFP_KERNEL);
735 dev_err(parent, "%s: out of memory\n", __func__);
739 dev_set_name(&h->dev, "heap-%s", name);
742 h->dev.parent = parent;
743 h->dev.driver = NULL;
744 h->dev.release = heap_release;
745 if (device_register(&h->dev)) {
746 dev_err(parent, "%s: failed to register %s\n", __func__,
750 if (sysfs_create_group(&h->dev.kobj, &heap_stat_attr_group)) {
751 dev_err(&h->dev, "%s: failed to create attributes\n", __func__);
754 h->small_alloc = max(2 * buddy_size, len / 256);
755 h->buddy_heap_size = buddy_size;
757 h->min_buddy_shift = ilog2(buddy_size / MAX_BUDDY_NR);
758 INIT_LIST_HEAD(&h->free_list);
759 INIT_LIST_HEAD(&h->buddy_list);
760 INIT_LIST_HEAD(&h->all_list);
761 mutex_init(&h->lock);
762 l->block.base = base;
763 l->block.type = BLOCK_EMPTY;
766 list_add_tail(&l->free_list, &h->free_list);
767 list_add_tail(&l->all_list, &h->all_list);
769 inner_flush_cache_all();
770 outer_flush_range(base, base + len);
773 dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
774 #ifdef CONFIG_PLATFORM_ENABLE_IOMMU
775 dma_map_linear_attrs(parent->parent, base, len, DMA_TO_DEVICE, &attrs);
780 device_unregister(&h->dev);
783 kmem_cache_free(block_cache, l);
788 void *nvmap_heap_device_to_arg(struct device *dev)
790 struct nvmap_heap *heap = container_of(dev, struct nvmap_heap, dev);
794 void *nvmap_heap_to_arg(struct nvmap_heap *heap)
799 /* nvmap_heap_destroy: frees all resources in heap */
800 void nvmap_heap_destroy(struct nvmap_heap *heap)
802 WARN_ON(!list_empty(&heap->buddy_list));
804 sysfs_remove_group(&heap->dev.kobj, &heap_stat_attr_group);
805 device_unregister(&heap->dev);
807 while (!list_empty(&heap->buddy_list)) {
808 struct buddy_heap *b;
809 b = list_first_entry(&heap->buddy_list, struct buddy_heap,
811 list_del(&heap->buddy_list);
812 nvmap_heap_free(&b->heap_base->block);
813 kmem_cache_free(buddy_heap_cache, b);
816 WARN_ON(!list_is_singular(&heap->all_list));
817 while (!list_empty(&heap->all_list)) {
818 struct list_block *l;
819 l = list_first_entry(&heap->all_list, struct list_block,
821 list_del(&l->all_list);
822 kmem_cache_free(block_cache, l);
828 /* nvmap_heap_create_group: adds the attribute_group grp to the heap kobject */
829 int nvmap_heap_create_group(struct nvmap_heap *heap,
830 const struct attribute_group *grp)
832 return sysfs_create_group(&heap->dev.kobj, grp);
835 /* nvmap_heap_remove_group: removes the attribute_group grp */
836 void nvmap_heap_remove_group(struct nvmap_heap *heap,
837 const struct attribute_group *grp)
839 sysfs_remove_group(&heap->dev.kobj, grp);
842 int nvmap_heap_init(void)
844 BUG_ON(buddy_heap_cache != NULL);
845 buddy_heap_cache = KMEM_CACHE(buddy_heap, 0);
846 if (!buddy_heap_cache) {
847 pr_err("%s: unable to create buddy heap cache\n", __func__);
851 block_cache = KMEM_CACHE(combo_block, 0);
853 kmem_cache_destroy(buddy_heap_cache);
854 pr_err("%s: unable to create block cache\n", __func__);
860 void nvmap_heap_deinit(void)
862 if (buddy_heap_cache)
863 kmem_cache_destroy(buddy_heap_cache);
865 kmem_cache_destroy(block_cache);
868 buddy_heap_cache = NULL;