2 * Basic general purpose allocator for managing special purpose
3 * memory, for example, memory that is not managed by the regular
4 * kmalloc/kfree interface. Uses for this includes on-device special
5 * memory, uncached memory etc.
7 * It is safe to use the allocator in NMI handlers and other special
8 * unblockable contexts that could otherwise deadlock on locks. This
9 * is implemented by using atomic operations and retries on any
10 * conflicts. The disadvantage is that there may be livelocks in
11 * extreme cases. For better scalability, one allocator can be used
14 * The lockless operation only works if there is enough memory
15 * available. If new memory is added to the pool a lock has to be
16 * still taken. So any user relying on locklessness has to ensure
17 * that sufficient memory is preallocated.
19 * The basic atomic operation of this allocator is cmpxchg on long.
20 * On architectures that don't have NMI-safe cmpxchg implementation,
21 * the allocator can NOT be used in NMI handler. So code uses the
22 * allocator in NMI handler should depend on
23 * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
25 * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
26 * Copyright (C) 2014, NVIDIA CORPORATION. All rights reserved.
28 * This source code is licensed under the GNU General Public License,
29 * Version 2. See the file COPYING for more details.
32 #include <linux/slab.h>
33 #include <linux/export.h>
34 #include <linux/bitmap.h>
35 #include <linux/rculist.h>
36 #include <linux/interrupt.h>
37 #include <linux/genalloc.h>
38 #include <linux/of_address.h>
39 #include <linux/of_device.h>
40 #include <asm/relaxed.h>
42 static inline size_t chunk_size(const struct gen_pool_chunk *chunk)
44 return chunk->end_addr - chunk->start_addr + 1;
47 static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set)
49 unsigned long val, nval;
53 if (val & mask_to_set)
56 while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val) {
57 cpu_relaxed_read_long(addr);
59 if (val & mask_to_set)
66 static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear)
68 unsigned long val, nval;
72 if ((val & mask_to_clear) != mask_to_clear)
75 while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val) {
76 cpu_relaxed_read_long(addr);
78 if ((val & mask_to_clear) != mask_to_clear)
86 * bitmap_set_ll - set the specified number of bits at the specified position
87 * @map: pointer to a bitmap
88 * @start: a bit position in @map
89 * @nr: number of bits to set
91 * Set @nr bits start from @start in @map lock-lessly. Several users
92 * can set/clear the same bitmap simultaneously without lock. If two
93 * users set the same bit, one user will return remain bits, otherwise
96 static int bitmap_set_ll(unsigned long *map, int start, int nr)
98 unsigned long *p = map + BIT_WORD(start);
99 const int size = start + nr;
100 int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
101 unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
103 while (nr - bits_to_set >= 0) {
104 if (set_bits_ll(p, mask_to_set))
107 bits_to_set = BITS_PER_LONG;
112 mask_to_set &= BITMAP_LAST_WORD_MASK(size);
113 if (set_bits_ll(p, mask_to_set))
121 * bitmap_clear_ll - clear the specified number of bits at the specified position
122 * @map: pointer to a bitmap
123 * @start: a bit position in @map
124 * @nr: number of bits to set
126 * Clear @nr bits start from @start in @map lock-lessly. Several users
127 * can set/clear the same bitmap simultaneously without lock. If two
128 * users clear the same bit, one user will return remain bits,
129 * otherwise return 0.
131 static int bitmap_clear_ll(unsigned long *map, int start, int nr)
133 unsigned long *p = map + BIT_WORD(start);
134 const int size = start + nr;
135 int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
136 unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
138 while (nr - bits_to_clear >= 0) {
139 if (clear_bits_ll(p, mask_to_clear))
142 bits_to_clear = BITS_PER_LONG;
143 mask_to_clear = ~0UL;
147 mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
148 if (clear_bits_ll(p, mask_to_clear))
156 * gen_pool_create - create a new special memory pool
157 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
158 * @nid: node id of the node the pool structure should be allocated on, or -1
160 * Create a new special memory pool that can be used to manage special purpose
161 * memory not managed by the regular kmalloc/kfree interface.
163 struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
165 struct gen_pool *pool;
167 pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
169 spin_lock_init(&pool->lock);
170 INIT_LIST_HEAD(&pool->chunks);
171 pool->min_alloc_order = min_alloc_order;
172 pool->algo = gen_pool_first_fit;
177 EXPORT_SYMBOL(gen_pool_create);
180 * gen_pool_add_virt - add a new chunk of special memory to the pool
181 * @pool: pool to add new memory chunk to
182 * @virt: virtual starting address of memory chunk to add to pool
183 * @phys: physical starting address of memory chunk to add to pool
184 * @size: size in bytes of the memory chunk to add to pool
185 * @nid: node id of the node the chunk structure and bitmap should be
186 * allocated on, or -1
188 * Add a new chunk of special memory to the specified pool.
190 * Returns 0 on success or a -ve errno on failure.
192 int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys,
193 size_t size, int nid)
195 struct gen_pool_chunk *chunk;
196 int nbits = size >> pool->min_alloc_order;
197 int nbytes = sizeof(struct gen_pool_chunk) +
198 BITS_TO_LONGS(nbits) * sizeof(long);
200 chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid);
201 if (unlikely(chunk == NULL))
204 chunk->phys_addr = phys;
205 chunk->start_addr = virt;
206 chunk->end_addr = virt + size - 1;
207 atomic_set(&chunk->avail, size);
209 spin_lock(&pool->lock);
210 list_add_rcu(&chunk->next_chunk, &pool->chunks);
211 spin_unlock(&pool->lock);
215 EXPORT_SYMBOL(gen_pool_add_virt);
218 * gen_pool_virt_to_phys - return the physical address of memory
219 * @pool: pool to allocate from
220 * @addr: starting address of memory
222 * Returns the physical address on success, or -1 on error.
224 phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr)
226 struct gen_pool_chunk *chunk;
227 phys_addr_t paddr = -1;
230 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
231 if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
232 paddr = chunk->phys_addr + (addr - chunk->start_addr);
240 EXPORT_SYMBOL(gen_pool_virt_to_phys);
243 * gen_pool_destroy - destroy a special memory pool
244 * @pool: pool to destroy
246 * Destroy the specified special memory pool. Verifies that there are no
247 * outstanding allocations.
249 void gen_pool_destroy(struct gen_pool *pool)
251 struct list_head *_chunk, *_next_chunk;
252 struct gen_pool_chunk *chunk;
253 int order = pool->min_alloc_order;
256 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
257 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
258 list_del(&chunk->next_chunk);
260 end_bit = chunk_size(chunk) >> order;
261 bit = find_next_bit(chunk->bits, end_bit, 0);
262 BUG_ON(bit < end_bit);
269 EXPORT_SYMBOL(gen_pool_destroy);
272 * gen_pool_alloc_addr - allocate special memory from the pool
273 * @pool: pool to allocate from
274 * @size: number of bytes to allocate from the pool
275 * @alloc_addr: if non-zero, allocate starting at alloc_addr.
277 * Allocate the requested number of bytes from the specified pool.
278 * Uses the pool allocation function (with first-fit algorithm by default).
279 * Can not be used in NMI handler on architectures without
280 * NMI-safe cmpxchg implementation.
282 unsigned long gen_pool_alloc_addr(struct gen_pool *pool, size_t size,
283 unsigned long alloc_addr)
285 struct gen_pool_chunk *chunk;
286 unsigned long addr = 0;
287 int order = pool->min_alloc_order;
288 int nbits, start_bit = 0, end_bit, remain;
289 int alloc_bit_needed = 0;
291 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
298 if (alloc_addr & (1 << order) - 1)
301 nbits = (size + (1UL << order) - 1) >> order;
303 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
304 if (size > atomic_read(&chunk->avail))
307 end_bit = chunk_size(chunk) >> order;
309 if (alloc_addr < chunk->start_addr ||
310 alloc_addr >= chunk->end_addr)
312 if (alloc_addr + size > chunk->end_addr)
314 alloc_bit_needed = start_bit =
315 (alloc_addr - chunk->start_addr) >> order;
318 start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits,
320 if (alloc_addr && alloc_bit_needed != start_bit)
322 if (start_bit >= end_bit)
324 remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
326 remain = bitmap_clear_ll(chunk->bits, start_bit,
332 addr = chunk->start_addr + ((unsigned long)start_bit << order);
333 size = nbits << order;
334 atomic_sub(size, &chunk->avail);
340 EXPORT_SYMBOL(gen_pool_alloc_addr);
343 * gen_pool_free - free allocated special memory back to the pool
344 * @pool: pool to free to
345 * @addr: starting address of memory to free back to pool
346 * @size: size in bytes of memory to free
348 * Free previously allocated special memory back to the specified
349 * pool. Can not be used in NMI handler on architectures without
350 * NMI-safe cmpxchg implementation.
352 void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
354 struct gen_pool_chunk *chunk;
355 int order = pool->min_alloc_order;
356 int start_bit, nbits, remain;
358 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
362 nbits = (size + (1UL << order) - 1) >> order;
364 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
365 if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
366 BUG_ON(addr + size - 1 > chunk->end_addr);
367 start_bit = (addr - chunk->start_addr) >> order;
368 remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
370 size = nbits << order;
371 atomic_add(size, &chunk->avail);
379 EXPORT_SYMBOL(gen_pool_free);
382 * gen_pool_for_each_chunk - call func for every chunk of generic memory pool
383 * @pool: the generic memory pool
384 * @func: func to call
385 * @data: additional data used by @func
387 * Call @func for every chunk of generic memory pool. The @func is
388 * called with rcu_read_lock held.
390 void gen_pool_for_each_chunk(struct gen_pool *pool,
391 void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data),
394 struct gen_pool_chunk *chunk;
397 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk)
398 func(pool, chunk, data);
401 EXPORT_SYMBOL(gen_pool_for_each_chunk);
404 * gen_pool_avail - get available free space of the pool
405 * @pool: pool to get available free space
407 * Return available free space of the specified pool.
409 size_t gen_pool_avail(struct gen_pool *pool)
411 struct gen_pool_chunk *chunk;
415 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
416 avail += atomic_read(&chunk->avail);
420 EXPORT_SYMBOL_GPL(gen_pool_avail);
423 * gen_pool_size - get size in bytes of memory managed by the pool
424 * @pool: pool to get size
426 * Return size in bytes of memory managed by the pool.
428 size_t gen_pool_size(struct gen_pool *pool)
430 struct gen_pool_chunk *chunk;
434 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
435 size += chunk_size(chunk);
439 EXPORT_SYMBOL_GPL(gen_pool_size);
442 * gen_pool_set_algo - set the allocation algorithm
443 * @pool: pool to change allocation algorithm
444 * @algo: custom algorithm function
445 * @data: additional data used by @algo
447 * Call @algo for each memory allocation in the pool.
448 * If @algo is NULL use gen_pool_first_fit as default
449 * memory allocation function.
451 void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data)
457 pool->algo = gen_pool_first_fit;
463 EXPORT_SYMBOL(gen_pool_set_algo);
466 * gen_pool_first_fit - find the first available region
467 * of memory matching the size requirement (no alignment constraint)
468 * @map: The address to base the search on
469 * @size: The bitmap size in bits
470 * @start: The bitnumber to start searching at
471 * @nr: The number of zeroed bits we're looking for
472 * @data: additional data - unused
474 unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
475 unsigned long start, unsigned int nr, void *data)
477 return bitmap_find_next_zero_area(map, size, start, nr, 0);
479 EXPORT_SYMBOL(gen_pool_first_fit);
482 * gen_pool_best_fit - find the best fitting region of memory
483 * macthing the size requirement (no alignment constraint)
484 * @map: The address to base the search on
485 * @size: The bitmap size in bits
486 * @start: The bitnumber to start searching at
487 * @nr: The number of zeroed bits we're looking for
488 * @data: additional data - unused
490 * Iterate over the bitmap to find the smallest free region
491 * which we can allocate the memory.
493 unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
494 unsigned long start, unsigned int nr, void *data)
496 unsigned long start_bit = size;
497 unsigned long len = size + 1;
500 index = bitmap_find_next_zero_area(map, size, start, nr, 0);
502 while (index < size) {
503 int next_bit = find_next_bit(map, size, index + nr);
504 if ((next_bit - index) < len) {
505 len = next_bit - index;
510 index = bitmap_find_next_zero_area(map, size,
511 next_bit + 1, nr, 0);
516 EXPORT_SYMBOL(gen_pool_best_fit);
518 static void devm_gen_pool_release(struct device *dev, void *res)
520 gen_pool_destroy(*(struct gen_pool **)res);
524 * devm_gen_pool_create - managed gen_pool_create
525 * @dev: device that provides the gen_pool
526 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
527 * @nid: node id of the node the pool structure should be allocated on, or -1
529 * Create a new special memory pool that can be used to manage special purpose
530 * memory not managed by the regular kmalloc/kfree interface. The pool will be
531 * automatically destroyed by the device management code.
533 struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order,
536 struct gen_pool **ptr, *pool;
538 ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL);
540 pool = gen_pool_create(min_alloc_order, nid);
543 devres_add(dev, ptr);
552 * dev_get_gen_pool - Obtain the gen_pool (if any) for a device
553 * @dev: device to retrieve the gen_pool from
554 * @name: Optional name for the gen_pool, usually NULL
556 * Returns the gen_pool for the device if one is present, or NULL.
558 struct gen_pool *dev_get_gen_pool(struct device *dev)
560 struct gen_pool **p = devres_find(dev, devm_gen_pool_release, NULL,
567 EXPORT_SYMBOL_GPL(dev_get_gen_pool);
571 * of_get_named_gen_pool - find a pool by phandle property
573 * @propname: property name containing phandle(s)
574 * @index: index into the phandle array
576 * Returns the pool that contains the chunk starting at the physical
577 * address of the device tree node pointed at by the phandle property,
578 * or NULL if not found.
580 struct gen_pool *of_get_named_gen_pool(struct device_node *np,
581 const char *propname, int index)
583 struct platform_device *pdev;
584 struct device_node *np_pool;
586 np_pool = of_parse_phandle(np, propname, index);
589 pdev = of_find_device_by_node(np_pool);
592 return dev_get_gen_pool(&pdev->dev);
594 EXPORT_SYMBOL_GPL(of_get_named_gen_pool);
595 #endif /* CONFIG_OF */