drm: mm: track free areas implicitly
[linux-2.6.git] / drivers / gpu / drm / drm_mm.c
1 /**************************************************************************
2  *
3  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  *
27  **************************************************************************/
28
29 /*
30  * Generic simple memory manager implementation. Intended to be used as a base
31  * class implementation for more advanced memory managers.
32  *
33  * Note that the algorithm used is quite simple and there might be substantial
34  * performance gains if a smarter free list is implemented. Currently it is just an
35  * unordered stack of free regions. This could easily be improved if an RB-tree
36  * is used instead. At least if we expect heavy fragmentation.
37  *
38  * Aligned allocations can also see improvement.
39  *
40  * Authors:
41  * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
42  */
43
44 #include "drmP.h"
45 #include "drm_mm.h"
46 #include <linux/slab.h>
47 #include <linux/seq_file.h>
48
49 #define MM_UNUSED_TARGET 4
50
51 static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
52 {
53         struct drm_mm_node *child;
54
55         if (atomic)
56                 child = kzalloc(sizeof(*child), GFP_ATOMIC);
57         else
58                 child = kzalloc(sizeof(*child), GFP_KERNEL);
59
60         if (unlikely(child == NULL)) {
61                 spin_lock(&mm->unused_lock);
62                 if (list_empty(&mm->unused_nodes))
63                         child = NULL;
64                 else {
65                         child =
66                             list_entry(mm->unused_nodes.next,
67                                        struct drm_mm_node, node_list);
68                         list_del(&child->node_list);
69                         --mm->num_unused;
70                 }
71                 spin_unlock(&mm->unused_lock);
72         }
73         return child;
74 }
75
76 /* drm_mm_pre_get() - pre allocate drm_mm_node structure
77  * drm_mm:      memory manager struct we are pre-allocating for
78  *
79  * Returns 0 on success or -ENOMEM if allocation fails.
80  */
81 int drm_mm_pre_get(struct drm_mm *mm)
82 {
83         struct drm_mm_node *node;
84
85         spin_lock(&mm->unused_lock);
86         while (mm->num_unused < MM_UNUSED_TARGET) {
87                 spin_unlock(&mm->unused_lock);
88                 node = kzalloc(sizeof(*node), GFP_KERNEL);
89                 spin_lock(&mm->unused_lock);
90
91                 if (unlikely(node == NULL)) {
92                         int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
93                         spin_unlock(&mm->unused_lock);
94                         return ret;
95                 }
96                 ++mm->num_unused;
97                 list_add_tail(&node->node_list, &mm->unused_nodes);
98         }
99         spin_unlock(&mm->unused_lock);
100         return 0;
101 }
102 EXPORT_SYMBOL(drm_mm_pre_get);
103
104 static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
105 {
106         return hole_node->start + hole_node->size;
107 }
108
109 static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
110 {
111         struct drm_mm_node *next_node =
112                 list_entry(hole_node->node_list.next, struct drm_mm_node,
113                            node_list);
114
115         return next_node->start;
116 }
117
118 struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
119                                              unsigned long size,
120                                              unsigned alignment,
121                                              int atomic)
122 {
123
124         struct drm_mm_node *node;
125         struct drm_mm *mm = hole_node->mm;
126         unsigned long tmp = 0, wasted = 0;
127         unsigned long hole_start = drm_mm_hole_node_start(hole_node);
128         unsigned long hole_end = drm_mm_hole_node_end(hole_node);
129
130         BUG_ON(!hole_node->hole_follows);
131
132         node = drm_mm_kmalloc(mm, atomic);
133         if (unlikely(node == NULL))
134                 return NULL;
135
136         if (alignment)
137                 tmp = hole_start % alignment;
138
139         if (!tmp) {
140                 hole_node->hole_follows = 0;
141                 list_del_init(&hole_node->hole_stack);
142         } else
143                 wasted = alignment - tmp;
144
145         node->start = hole_start + wasted;
146         node->size = size;
147         node->mm = mm;
148
149         INIT_LIST_HEAD(&node->hole_stack);
150         list_add(&node->node_list, &hole_node->node_list);
151
152         BUG_ON(node->start + node->size > hole_end);
153
154         if (node->start + node->size < hole_end) {
155                 list_add(&node->hole_stack, &mm->hole_stack);
156                 node->hole_follows = 1;
157         } else {
158                 node->hole_follows = 0;
159         }
160
161         return node;
162 }
163 EXPORT_SYMBOL(drm_mm_get_block_generic);
164
165 struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
166                                                 unsigned long size,
167                                                 unsigned alignment,
168                                                 unsigned long start,
169                                                 unsigned long end,
170                                                 int atomic)
171 {
172         struct drm_mm_node *node;
173         struct drm_mm *mm = hole_node->mm;
174         unsigned long tmp = 0, wasted = 0;
175         unsigned long hole_start = drm_mm_hole_node_start(hole_node);
176         unsigned long hole_end = drm_mm_hole_node_end(hole_node);
177
178         BUG_ON(!hole_node->hole_follows);
179
180         node = drm_mm_kmalloc(mm, atomic);
181         if (unlikely(node == NULL))
182                 return NULL;
183
184         if (hole_start < start)
185                 wasted += start - hole_start;
186         if (alignment)
187                 tmp = (hole_start + wasted) % alignment;
188
189         if (tmp)
190                 wasted += alignment - tmp;
191
192         if (!wasted) {
193                 hole_node->hole_follows = 0;
194                 list_del_init(&hole_node->hole_stack);
195         }
196
197         node->start = hole_start + wasted;
198         node->size = size;
199         node->mm = mm;
200
201         INIT_LIST_HEAD(&node->hole_stack);
202         list_add(&node->node_list, &hole_node->node_list);
203
204         BUG_ON(node->start + node->size > hole_end);
205         BUG_ON(node->start + node->size > end);
206
207         if (node->start + node->size < hole_end) {
208                 list_add(&node->hole_stack, &mm->hole_stack);
209                 node->hole_follows = 1;
210         } else {
211                 node->hole_follows = 0;
212         }
213
214         return node;
215 }
216 EXPORT_SYMBOL(drm_mm_get_block_range_generic);
217
218 /*
219  * Put a block. Merge with the previous and / or next block if they are free.
220  * Otherwise add to the free stack.
221  */
222
223 void drm_mm_put_block(struct drm_mm_node *node)
224 {
225
226         struct drm_mm *mm = node->mm;
227         struct drm_mm_node *prev_node;
228
229         BUG_ON(node->scanned_block || node->scanned_prev_free
230                                    || node->scanned_next_free);
231
232         prev_node =
233             list_entry(node->node_list.prev, struct drm_mm_node, node_list);
234
235         if (node->hole_follows) {
236                 BUG_ON(drm_mm_hole_node_start(node)
237                                 == drm_mm_hole_node_end(node));
238                 list_del(&node->hole_stack);
239         } else
240                 BUG_ON(drm_mm_hole_node_start(node)
241                                 != drm_mm_hole_node_end(node));
242
243         if (!prev_node->hole_follows) {
244                 prev_node->hole_follows = 1;
245                 list_add(&prev_node->hole_stack, &mm->hole_stack);
246         } else
247                 list_move(&prev_node->hole_stack, &mm->hole_stack);
248
249         list_del(&node->node_list);
250         spin_lock(&mm->unused_lock);
251         if (mm->num_unused < MM_UNUSED_TARGET) {
252                 list_add(&node->node_list, &mm->unused_nodes);
253                 ++mm->num_unused;
254         } else
255                 kfree(node);
256         spin_unlock(&mm->unused_lock);
257 }
258 EXPORT_SYMBOL(drm_mm_put_block);
259
260 static int check_free_hole(unsigned long start, unsigned long end,
261                            unsigned long size, unsigned alignment)
262 {
263         unsigned wasted = 0;
264
265         if (end - start < size)
266                 return 0;
267
268         if (alignment) {
269                 unsigned tmp = start % alignment;
270                 if (tmp)
271                         wasted = alignment - tmp;
272         }
273
274         if (end >= start + size + wasted) {
275                 return 1;
276         }
277
278         return 0;
279 }
280
281 struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
282                                        unsigned long size,
283                                        unsigned alignment, int best_match)
284 {
285         struct drm_mm_node *entry;
286         struct drm_mm_node *best;
287         unsigned long best_size;
288
289         BUG_ON(mm->scanned_blocks);
290
291         best = NULL;
292         best_size = ~0UL;
293
294         list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
295                 BUG_ON(!entry->hole_follows);
296                 if (!check_free_hole(drm_mm_hole_node_start(entry),
297                                      drm_mm_hole_node_end(entry),
298                                      size, alignment))
299                         continue;
300
301                 if (!best_match)
302                         return entry;
303
304                 if (entry->size < best_size) {
305                         best = entry;
306                         best_size = entry->size;
307                 }
308         }
309
310         return best;
311 }
312 EXPORT_SYMBOL(drm_mm_search_free);
313
314 struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
315                                                 unsigned long size,
316                                                 unsigned alignment,
317                                                 unsigned long start,
318                                                 unsigned long end,
319                                                 int best_match)
320 {
321         struct drm_mm_node *entry;
322         struct drm_mm_node *best;
323         unsigned long best_size;
324
325         BUG_ON(mm->scanned_blocks);
326
327         best = NULL;
328         best_size = ~0UL;
329
330         list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
331                 unsigned long adj_start = drm_mm_hole_node_start(entry) < start ?
332                         start : drm_mm_hole_node_start(entry);
333                 unsigned long adj_end = drm_mm_hole_node_end(entry) > end ?
334                         end : drm_mm_hole_node_end(entry);
335
336                 BUG_ON(!entry->hole_follows);
337                 if (!check_free_hole(adj_start, adj_end, size, alignment))
338                         continue;
339
340                 if (!best_match)
341                         return entry;
342
343                 if (entry->size < best_size) {
344                         best = entry;
345                         best_size = entry->size;
346                 }
347         }
348
349         return best;
350 }
351 EXPORT_SYMBOL(drm_mm_search_free_in_range);
352
353 /**
354  * Initializa lru scanning.
355  *
356  * This simply sets up the scanning routines with the parameters for the desired
357  * hole.
358  *
359  * Warning: As long as the scan list is non-empty, no other operations than
360  * adding/removing nodes to/from the scan list are allowed.
361  */
362 void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
363                       unsigned alignment)
364 {
365         mm->scan_alignment = alignment;
366         mm->scan_size = size;
367         mm->scanned_blocks = 0;
368         mm->scan_hit_start = 0;
369         mm->scan_hit_size = 0;
370         mm->scan_check_range = 0;
371 }
372 EXPORT_SYMBOL(drm_mm_init_scan);
373
374 /**
375  * Initializa lru scanning.
376  *
377  * This simply sets up the scanning routines with the parameters for the desired
378  * hole. This version is for range-restricted scans.
379  *
380  * Warning: As long as the scan list is non-empty, no other operations than
381  * adding/removing nodes to/from the scan list are allowed.
382  */
383 void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
384                                  unsigned alignment,
385                                  unsigned long start,
386                                  unsigned long end)
387 {
388         mm->scan_alignment = alignment;
389         mm->scan_size = size;
390         mm->scanned_blocks = 0;
391         mm->scan_hit_start = 0;
392         mm->scan_hit_size = 0;
393         mm->scan_start = start;
394         mm->scan_end = end;
395         mm->scan_check_range = 1;
396 }
397 EXPORT_SYMBOL(drm_mm_init_scan_with_range);
398
399 /**
400  * Add a node to the scan list that might be freed to make space for the desired
401  * hole.
402  *
403  * Returns non-zero, if a hole has been found, zero otherwise.
404  */
405 int drm_mm_scan_add_block(struct drm_mm_node *node)
406 {
407         struct drm_mm *mm = node->mm;
408         struct drm_mm_node *prev_node;
409         unsigned long hole_start, hole_end;
410         unsigned long adj_start;
411         unsigned long adj_end;
412
413         mm->scanned_blocks++;
414
415         BUG_ON(node->scanned_block);
416         node->scanned_block = 1;
417
418         prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
419                                node_list);
420
421         node->scanned_preceeds_hole = prev_node->hole_follows;
422         prev_node->hole_follows = 1;
423         list_del(&node->node_list);
424         node->node_list.prev = &prev_node->node_list;
425
426         hole_start = drm_mm_hole_node_start(prev_node);
427         hole_end = drm_mm_hole_node_end(prev_node);
428         if (mm->scan_check_range) {
429                 adj_start = hole_start < mm->scan_start ?
430                         mm->scan_start : hole_start;
431                 adj_end = hole_end > mm->scan_end ?
432                         mm->scan_end : hole_end;
433         } else {
434                 adj_start = hole_start;
435                 adj_end = hole_end;
436         }
437
438         if (check_free_hole(adj_start , adj_end,
439                             mm->scan_size, mm->scan_alignment)) {
440                 mm->scan_hit_start = hole_start;
441                 mm->scan_hit_size = hole_end;
442
443                 return 1;
444         }
445
446         return 0;
447 }
448 EXPORT_SYMBOL(drm_mm_scan_add_block);
449
450 /**
451  * Remove a node from the scan list.
452  *
453  * Nodes _must_ be removed in the exact same order from the scan list as they
454  * have been added, otherwise the internal state of the memory manager will be
455  * corrupted.
456  *
457  * When the scan list is empty, the selected memory nodes can be freed. An
458  * immediatly following drm_mm_search_free with best_match = 0 will then return
459  * the just freed block (because its at the top of the free_stack list).
460  *
461  * Returns one if this block should be evicted, zero otherwise. Will always
462  * return zero when no hole has been found.
463  */
464 int drm_mm_scan_remove_block(struct drm_mm_node *node)
465 {
466         struct drm_mm *mm = node->mm;
467         struct drm_mm_node *prev_node;
468
469         mm->scanned_blocks--;
470
471         BUG_ON(!node->scanned_block);
472         node->scanned_block = 0;
473
474         prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
475                                node_list);
476
477         prev_node->hole_follows = node->scanned_preceeds_hole;
478         INIT_LIST_HEAD(&node->node_list);
479         list_add(&node->node_list, &prev_node->node_list);
480
481         /* Only need to check for containement because start&size for the
482          * complete resulting free block (not just the desired part) is
483          * stored. */
484         if (node->start >= mm->scan_hit_start &&
485             node->start + node->size
486                         <= mm->scan_hit_start + mm->scan_hit_size) {
487                 return 1;
488         }
489
490         return 0;
491 }
492 EXPORT_SYMBOL(drm_mm_scan_remove_block);
493
494 int drm_mm_clean(struct drm_mm * mm)
495 {
496         struct list_head *head = &mm->head_node.node_list;
497
498         return (head->next->next == head);
499 }
500 EXPORT_SYMBOL(drm_mm_clean);
501
502 int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
503 {
504         INIT_LIST_HEAD(&mm->hole_stack);
505         INIT_LIST_HEAD(&mm->unused_nodes);
506         mm->num_unused = 0;
507         mm->scanned_blocks = 0;
508         spin_lock_init(&mm->unused_lock);
509
510         /* Clever trick to avoid a special case in the free hole tracking. */
511         INIT_LIST_HEAD(&mm->head_node.node_list);
512         INIT_LIST_HEAD(&mm->head_node.hole_stack);
513         mm->head_node.hole_follows = 1;
514         mm->head_node.scanned_block = 0;
515         mm->head_node.scanned_prev_free = 0;
516         mm->head_node.scanned_next_free = 0;
517         mm->head_node.mm = mm;
518         mm->head_node.start = start + size;
519         mm->head_node.size = start - mm->head_node.start;
520         list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
521
522         return 0;
523 }
524 EXPORT_SYMBOL(drm_mm_init);
525
526 void drm_mm_takedown(struct drm_mm * mm)
527 {
528         struct drm_mm_node *entry, *next;
529
530         if (!list_empty(&mm->head_node.node_list)) {
531                 DRM_ERROR("Memory manager not clean. Delaying takedown\n");
532                 return;
533         }
534
535         spin_lock(&mm->unused_lock);
536         list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
537                 list_del(&entry->node_list);
538                 kfree(entry);
539                 --mm->num_unused;
540         }
541         spin_unlock(&mm->unused_lock);
542
543         BUG_ON(mm->num_unused != 0);
544 }
545 EXPORT_SYMBOL(drm_mm_takedown);
546
547 void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
548 {
549         struct drm_mm_node *entry;
550         unsigned long total_used = 0, total_free = 0, total = 0;
551         unsigned long hole_start, hole_end, hole_size;
552
553         hole_start = drm_mm_hole_node_start(&mm->head_node);
554         hole_end = drm_mm_hole_node_end(&mm->head_node);
555         hole_size = hole_end - hole_start;
556         if (hole_size)
557                 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
558                         prefix, hole_start, hole_end,
559                         hole_size);
560         total_free += hole_size;
561
562         drm_mm_for_each_node(entry, mm) {
563                 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
564                         prefix, entry->start, entry->start + entry->size,
565                         entry->size);
566                 total_used += entry->size;
567
568                 if (entry->hole_follows) {
569                         hole_start = drm_mm_hole_node_start(entry);
570                         hole_end = drm_mm_hole_node_end(entry);
571                         hole_size = hole_end - hole_start;
572                         printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
573                                 prefix, hole_start, hole_end,
574                                 hole_size);
575                         total_free += hole_size;
576                 }
577         }
578         total = total_free + total_used;
579
580         printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
581                 total_used, total_free);
582 }
583 EXPORT_SYMBOL(drm_mm_debug_table);
584
585 #if defined(CONFIG_DEBUG_FS)
586 int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
587 {
588         struct drm_mm_node *entry;
589         unsigned long total_used = 0, total_free = 0, total = 0;
590         unsigned long hole_start, hole_end, hole_size;
591
592         hole_start = drm_mm_hole_node_start(&mm->head_node);
593         hole_end = drm_mm_hole_node_end(&mm->head_node);
594         hole_size = hole_end - hole_start;
595         if (hole_size)
596                 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
597                                 hole_start, hole_end, hole_size);
598         total_free += hole_size;
599
600         drm_mm_for_each_node(entry, mm) {
601                 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
602                                 entry->start, entry->start + entry->size,
603                                 entry->size);
604                 total_used += entry->size;
605                 if (entry->hole_follows) {
606                         hole_start = drm_mm_hole_node_start(&mm->head_node);
607                         hole_end = drm_mm_hole_node_end(&mm->head_node);
608                         hole_size = hole_end - hole_start;
609                         seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
610                                         hole_start, hole_end, hole_size);
611                         total_free += hole_size;
612                 }
613         }
614         total = total_free + total_used;
615
616         seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
617         return 0;
618 }
619 EXPORT_SYMBOL(drm_mm_dump_table);
620 #endif