2 * Basic general purpose allocator for managing special purpose
3 * memory, for example, memory that is not managed by the regular
4 * kmalloc/kfree interface. Uses for this includes on-device special
5 * memory, uncached memory etc.
7 * It is safe to use the allocator in NMI handlers and other special
8 * unblockable contexts that could otherwise deadlock on locks. This
9 * is implemented by using atomic operations and retries on any
10 * conflicts. The disadvantage is that there may be livelocks in
11 * extreme cases. For better scalability, one allocator can be used
14 * The lockless operation only works if there is enough memory
15 * available. If new memory is added to the pool a lock has to be
16 * still taken. So any user relying on locklessness has to ensure
17 * that sufficient memory is preallocated.
19 * The basic atomic operation of this allocator is cmpxchg on long.
20 * On architectures that don't have NMI-safe cmpxchg implementation,
21 * the allocator can NOT be used in NMI handler. So code uses the
22 * allocator in NMI handler should depend on
23 * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
25 * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
27 * This source code is licensed under the GNU General Public License,
28 * Version 2. See the file COPYING for more details.
31 #include <linux/slab.h>
32 #include <linux/export.h>
33 #include <linux/bitmap.h>
34 #include <linux/rculist.h>
35 #include <linux/interrupt.h>
36 #include <linux/genalloc.h>
37 #include <linux/of_address.h>
38 #include <linux/of_device.h>
40 static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set)
42 unsigned long val, nval;
47 if (val & mask_to_set)
50 } while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val);
55 static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear)
57 unsigned long val, nval;
62 if ((val & mask_to_clear) != mask_to_clear)
65 } while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val);
71 * bitmap_set_ll - set the specified number of bits at the specified position
72 * @map: pointer to a bitmap
73 * @start: a bit position in @map
74 * @nr: number of bits to set
76 * Set @nr bits start from @start in @map lock-lessly. Several users
77 * can set/clear the same bitmap simultaneously without lock. If two
78 * users set the same bit, one user will return remain bits, otherwise
81 static int bitmap_set_ll(unsigned long *map, int start, int nr)
83 unsigned long *p = map + BIT_WORD(start);
84 const int size = start + nr;
85 int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
86 unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
88 while (nr - bits_to_set >= 0) {
89 if (set_bits_ll(p, mask_to_set))
92 bits_to_set = BITS_PER_LONG;
97 mask_to_set &= BITMAP_LAST_WORD_MASK(size);
98 if (set_bits_ll(p, mask_to_set))
106 * bitmap_clear_ll - clear the specified number of bits at the specified position
107 * @map: pointer to a bitmap
108 * @start: a bit position in @map
109 * @nr: number of bits to set
111 * Clear @nr bits start from @start in @map lock-lessly. Several users
112 * can set/clear the same bitmap simultaneously without lock. If two
113 * users clear the same bit, one user will return remain bits,
114 * otherwise return 0.
116 static int bitmap_clear_ll(unsigned long *map, int start, int nr)
118 unsigned long *p = map + BIT_WORD(start);
119 const int size = start + nr;
120 int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
121 unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
123 while (nr - bits_to_clear >= 0) {
124 if (clear_bits_ll(p, mask_to_clear))
127 bits_to_clear = BITS_PER_LONG;
128 mask_to_clear = ~0UL;
132 mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
133 if (clear_bits_ll(p, mask_to_clear))
141 * gen_pool_create - create a new special memory pool
142 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
143 * @nid: node id of the node the pool structure should be allocated on, or -1
145 * Create a new special memory pool that can be used to manage special purpose
146 * memory not managed by the regular kmalloc/kfree interface.
148 struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
150 struct gen_pool *pool;
152 pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
154 spin_lock_init(&pool->lock);
155 INIT_LIST_HEAD(&pool->chunks);
156 pool->min_alloc_order = min_alloc_order;
157 pool->algo = gen_pool_first_fit;
162 EXPORT_SYMBOL(gen_pool_create);
165 * gen_pool_add_virt - add a new chunk of special memory to the pool
166 * @pool: pool to add new memory chunk to
167 * @virt: virtual starting address of memory chunk to add to pool
168 * @phys: physical starting address of memory chunk to add to pool
169 * @size: size in bytes of the memory chunk to add to pool
170 * @nid: node id of the node the chunk structure and bitmap should be
171 * allocated on, or -1
173 * Add a new chunk of special memory to the specified pool.
175 * Returns 0 on success or a -ve errno on failure.
177 int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys,
178 size_t size, int nid)
180 struct gen_pool_chunk *chunk;
181 int nbits = size >> pool->min_alloc_order;
182 int nbytes = sizeof(struct gen_pool_chunk) +
183 BITS_TO_LONGS(nbits) * sizeof(long);
185 chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid);
186 if (unlikely(chunk == NULL))
189 chunk->phys_addr = phys;
190 chunk->start_addr = virt;
191 chunk->end_addr = virt + size;
192 atomic_set(&chunk->avail, size);
194 spin_lock(&pool->lock);
195 list_add_rcu(&chunk->next_chunk, &pool->chunks);
196 spin_unlock(&pool->lock);
200 EXPORT_SYMBOL(gen_pool_add_virt);
203 * gen_pool_virt_to_phys - return the physical address of memory
204 * @pool: pool to allocate from
205 * @addr: starting address of memory
207 * Returns the physical address on success, or -1 on error.
209 phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr)
211 struct gen_pool_chunk *chunk;
212 phys_addr_t paddr = -1;
215 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
216 if (addr >= chunk->start_addr && addr < chunk->end_addr) {
217 paddr = chunk->phys_addr + (addr - chunk->start_addr);
225 EXPORT_SYMBOL(gen_pool_virt_to_phys);
228 * gen_pool_destroy - destroy a special memory pool
229 * @pool: pool to destroy
231 * Destroy the specified special memory pool. Verifies that there are no
232 * outstanding allocations.
234 void gen_pool_destroy(struct gen_pool *pool)
236 struct list_head *_chunk, *_next_chunk;
237 struct gen_pool_chunk *chunk;
238 int order = pool->min_alloc_order;
241 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
242 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
243 list_del(&chunk->next_chunk);
245 end_bit = (chunk->end_addr - chunk->start_addr) >> order;
246 bit = find_next_bit(chunk->bits, end_bit, 0);
247 BUG_ON(bit < end_bit);
254 EXPORT_SYMBOL(gen_pool_destroy);
257 * gen_pool_alloc_addr - allocate special memory from the pool
258 * @pool: pool to allocate from
259 * @size: number of bytes to allocate from the pool
260 * @alloc_addr: if non-zero, allocate starting at alloc_addr.
262 * Allocate the requested number of bytes from the specified pool.
263 * Uses the pool allocation function (with first-fit algorithm by default).
264 * Can not be used in NMI handler on architectures without
265 * NMI-safe cmpxchg implementation.
267 unsigned long gen_pool_alloc_addr(struct gen_pool *pool, size_t size,
268 unsigned long alloc_addr)
270 struct gen_pool_chunk *chunk;
271 unsigned long addr = 0;
272 int order = pool->min_alloc_order;
273 int nbits, start_bit = 0, end_bit, remain;
274 int alloc_bit_needed = 0;
276 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
283 if (alloc_addr & (1 << order) - 1)
286 nbits = (size + (1UL << order) - 1) >> order;
288 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
289 if (size > atomic_read(&chunk->avail))
292 end_bit = (chunk->end_addr - chunk->start_addr) >> order;
294 if (alloc_addr < chunk->start_addr ||
295 alloc_addr >= chunk->end_addr)
297 if (alloc_addr + size > chunk->end_addr)
299 alloc_bit_needed = start_bit =
300 (alloc_addr - chunk->start_addr) >> order;
303 start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits,
305 if (alloc_addr && alloc_bit_needed != start_bit)
307 if (start_bit >= end_bit)
309 remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
311 remain = bitmap_clear_ll(chunk->bits, start_bit,
317 addr = chunk->start_addr + ((unsigned long)start_bit << order);
318 size = nbits << order;
319 atomic_sub(size, &chunk->avail);
325 EXPORT_SYMBOL(gen_pool_alloc_addr);
328 * gen_pool_free - free allocated special memory back to the pool
329 * @pool: pool to free to
330 * @addr: starting address of memory to free back to pool
331 * @size: size in bytes of memory to free
333 * Free previously allocated special memory back to the specified
334 * pool. Can not be used in NMI handler on architectures without
335 * NMI-safe cmpxchg implementation.
337 void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
339 struct gen_pool_chunk *chunk;
340 int order = pool->min_alloc_order;
341 int start_bit, nbits, remain;
343 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
347 nbits = (size + (1UL << order) - 1) >> order;
349 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
350 if (addr >= chunk->start_addr && addr < chunk->end_addr) {
351 BUG_ON(addr + size > chunk->end_addr);
352 start_bit = (addr - chunk->start_addr) >> order;
353 remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
355 size = nbits << order;
356 atomic_add(size, &chunk->avail);
364 EXPORT_SYMBOL(gen_pool_free);
367 * gen_pool_for_each_chunk - call func for every chunk of generic memory pool
368 * @pool: the generic memory pool
369 * @func: func to call
370 * @data: additional data used by @func
372 * Call @func for every chunk of generic memory pool. The @func is
373 * called with rcu_read_lock held.
375 void gen_pool_for_each_chunk(struct gen_pool *pool,
376 void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data),
379 struct gen_pool_chunk *chunk;
382 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk)
383 func(pool, chunk, data);
386 EXPORT_SYMBOL(gen_pool_for_each_chunk);
389 * gen_pool_avail - get available free space of the pool
390 * @pool: pool to get available free space
392 * Return available free space of the specified pool.
394 size_t gen_pool_avail(struct gen_pool *pool)
396 struct gen_pool_chunk *chunk;
400 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
401 avail += atomic_read(&chunk->avail);
405 EXPORT_SYMBOL_GPL(gen_pool_avail);
408 * gen_pool_size - get size in bytes of memory managed by the pool
409 * @pool: pool to get size
411 * Return size in bytes of memory managed by the pool.
413 size_t gen_pool_size(struct gen_pool *pool)
415 struct gen_pool_chunk *chunk;
419 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
420 size += chunk->end_addr - chunk->start_addr;
424 EXPORT_SYMBOL_GPL(gen_pool_size);
427 * gen_pool_set_algo - set the allocation algorithm
428 * @pool: pool to change allocation algorithm
429 * @algo: custom algorithm function
430 * @data: additional data used by @algo
432 * Call @algo for each memory allocation in the pool.
433 * If @algo is NULL use gen_pool_first_fit as default
434 * memory allocation function.
436 void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data)
442 pool->algo = gen_pool_first_fit;
448 EXPORT_SYMBOL(gen_pool_set_algo);
451 * gen_pool_first_fit - find the first available region
452 * of memory matching the size requirement (no alignment constraint)
453 * @map: The address to base the search on
454 * @size: The bitmap size in bits
455 * @start: The bitnumber to start searching at
456 * @nr: The number of zeroed bits we're looking for
457 * @data: additional data - unused
459 unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
460 unsigned long start, unsigned int nr, void *data)
462 return bitmap_find_next_zero_area(map, size, start, nr, 0);
464 EXPORT_SYMBOL(gen_pool_first_fit);
467 * gen_pool_best_fit - find the best fitting region of memory
468 * macthing the size requirement (no alignment constraint)
469 * @map: The address to base the search on
470 * @size: The bitmap size in bits
471 * @start: The bitnumber to start searching at
472 * @nr: The number of zeroed bits we're looking for
473 * @data: additional data - unused
475 * Iterate over the bitmap to find the smallest free region
476 * which we can allocate the memory.
478 unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
479 unsigned long start, unsigned int nr, void *data)
481 unsigned long start_bit = size;
482 unsigned long len = size + 1;
485 index = bitmap_find_next_zero_area(map, size, start, nr, 0);
487 while (index < size) {
488 int next_bit = find_next_bit(map, size, index + nr);
489 if ((next_bit - index) < len) {
490 len = next_bit - index;
495 index = bitmap_find_next_zero_area(map, size,
496 next_bit + 1, nr, 0);
501 EXPORT_SYMBOL(gen_pool_best_fit);
503 static void devm_gen_pool_release(struct device *dev, void *res)
505 gen_pool_destroy(*(struct gen_pool **)res);
509 * devm_gen_pool_create - managed gen_pool_create
510 * @dev: device that provides the gen_pool
511 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
512 * @nid: node id of the node the pool structure should be allocated on, or -1
514 * Create a new special memory pool that can be used to manage special purpose
515 * memory not managed by the regular kmalloc/kfree interface. The pool will be
516 * automatically destroyed by the device management code.
518 struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order,
521 struct gen_pool **ptr, *pool;
523 ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL);
525 pool = gen_pool_create(min_alloc_order, nid);
528 devres_add(dev, ptr);
537 * dev_get_gen_pool - Obtain the gen_pool (if any) for a device
538 * @dev: device to retrieve the gen_pool from
539 * @name: Optional name for the gen_pool, usually NULL
541 * Returns the gen_pool for the device if one is present, or NULL.
543 struct gen_pool *dev_get_gen_pool(struct device *dev)
545 struct gen_pool **p = devres_find(dev, devm_gen_pool_release, NULL,
552 EXPORT_SYMBOL_GPL(dev_get_gen_pool);
556 * of_get_named_gen_pool - find a pool by phandle property
558 * @propname: property name containing phandle(s)
559 * @index: index into the phandle array
561 * Returns the pool that contains the chunk starting at the physical
562 * address of the device tree node pointed at by the phandle property,
563 * or NULL if not found.
565 struct gen_pool *of_get_named_gen_pool(struct device_node *np,
566 const char *propname, int index)
568 struct platform_device *pdev;
569 struct device_node *np_pool;
571 np_pool = of_parse_phandle(np, propname, index);
574 pdev = of_find_device_by_node(np_pool);
577 return dev_get_gen_pool(&pdev->dev);
579 EXPORT_SYMBOL_GPL(of_get_named_gen_pool);
580 #endif /* CONFIG_OF */