drm: Memory fragmentation from lost alignment blocks
[linux-2.6.git] / drivers / gpu / drm / drm_mm.c
1 /**************************************************************************
2  *
3  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  *
27  **************************************************************************/
28
29 /*
30  * Generic simple memory manager implementation. Intended to be used as a base
31  * class implementation for more advanced memory managers.
32  *
33  * Note that the algorithm used is quite simple and there might be substantial
34  * performance gains if a smarter free list is implemented. Currently it is just an
35  * unordered stack of free regions. This could easily be improved if an RB-tree
36  * is used instead. At least if we expect heavy fragmentation.
37  *
38  * Aligned allocations can also see improvement.
39  *
40  * Authors:
41  * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
42  */
43
44 #include "drmP.h"
45 #include "drm_mm.h"
46 #include <linux/slab.h>
47
48 #define MM_UNUSED_TARGET 4
49
50 unsigned long drm_mm_tail_space(struct drm_mm *mm)
51 {
52         struct list_head *tail_node;
53         struct drm_mm_node *entry;
54
55         tail_node = mm->ml_entry.prev;
56         entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
57         if (!entry->free)
58                 return 0;
59
60         return entry->size;
61 }
62
63 int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
64 {
65         struct list_head *tail_node;
66         struct drm_mm_node *entry;
67
68         tail_node = mm->ml_entry.prev;
69         entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
70         if (!entry->free)
71                 return -ENOMEM;
72
73         if (entry->size <= size)
74                 return -ENOMEM;
75
76         entry->size -= size;
77         return 0;
78 }
79
80 static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
81 {
82         struct drm_mm_node *child;
83
84         if (atomic)
85                 child = kmalloc(sizeof(*child), GFP_ATOMIC);
86         else
87                 child = kmalloc(sizeof(*child), GFP_KERNEL);
88
89         if (unlikely(child == NULL)) {
90                 spin_lock(&mm->unused_lock);
91                 if (list_empty(&mm->unused_nodes))
92                         child = NULL;
93                 else {
94                         child =
95                             list_entry(mm->unused_nodes.next,
96                                        struct drm_mm_node, fl_entry);
97                         list_del(&child->fl_entry);
98                         --mm->num_unused;
99                 }
100                 spin_unlock(&mm->unused_lock);
101         }
102         return child;
103 }
104
105 int drm_mm_pre_get(struct drm_mm *mm)
106 {
107         struct drm_mm_node *node;
108
109         spin_lock(&mm->unused_lock);
110         while (mm->num_unused < MM_UNUSED_TARGET) {
111                 spin_unlock(&mm->unused_lock);
112                 node = kmalloc(sizeof(*node), GFP_KERNEL);
113                 spin_lock(&mm->unused_lock);
114
115                 if (unlikely(node == NULL)) {
116                         int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
117                         spin_unlock(&mm->unused_lock);
118                         return ret;
119                 }
120                 ++mm->num_unused;
121                 list_add_tail(&node->fl_entry, &mm->unused_nodes);
122         }
123         spin_unlock(&mm->unused_lock);
124         return 0;
125 }
126 EXPORT_SYMBOL(drm_mm_pre_get);
127
128 static int drm_mm_create_tail_node(struct drm_mm *mm,
129                                    unsigned long start,
130                                    unsigned long size, int atomic)
131 {
132         struct drm_mm_node *child;
133
134         child = drm_mm_kmalloc(mm, atomic);
135         if (unlikely(child == NULL))
136                 return -ENOMEM;
137
138         child->free = 1;
139         child->size = size;
140         child->start = start;
141         child->mm = mm;
142
143         list_add_tail(&child->ml_entry, &mm->ml_entry);
144         list_add_tail(&child->fl_entry, &mm->fl_entry);
145
146         return 0;
147 }
148
149 int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size, int atomic)
150 {
151         struct list_head *tail_node;
152         struct drm_mm_node *entry;
153
154         tail_node = mm->ml_entry.prev;
155         entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
156         if (!entry->free) {
157                 return drm_mm_create_tail_node(mm, entry->start + entry->size,
158                                                size, atomic);
159         }
160         entry->size += size;
161         return 0;
162 }
163
164 static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
165                                                  unsigned long size,
166                                                  int atomic)
167 {
168         struct drm_mm_node *child;
169
170         child = drm_mm_kmalloc(parent->mm, atomic);
171         if (unlikely(child == NULL))
172                 return NULL;
173
174         INIT_LIST_HEAD(&child->fl_entry);
175
176         child->free = 0;
177         child->size = size;
178         child->start = parent->start;
179         child->mm = parent->mm;
180
181         list_add_tail(&child->ml_entry, &parent->ml_entry);
182         INIT_LIST_HEAD(&child->fl_entry);
183
184         parent->size -= size;
185         parent->start += size;
186         return child;
187 }
188
189
190
191 struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *node,
192                                      unsigned long size, unsigned alignment)
193 {
194
195         struct drm_mm_node *align_splitoff = NULL;
196         unsigned tmp = 0;
197
198         if (alignment)
199                 tmp = node->start % alignment;
200
201         if (tmp) {
202                 align_splitoff =
203                     drm_mm_split_at_start(node, alignment - tmp, 0);
204                 if (unlikely(align_splitoff == NULL))
205                         return NULL;
206         }
207
208         if (node->size == size) {
209                 list_del_init(&node->fl_entry);
210                 node->free = 0;
211         } else {
212                 node = drm_mm_split_at_start(node, size, 0);
213         }
214
215         if (align_splitoff)
216                 drm_mm_put_block(align_splitoff);
217
218         return node;
219 }
220
221 EXPORT_SYMBOL(drm_mm_get_block);
222
223 struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent,
224                                             unsigned long size,
225                                             unsigned alignment)
226 {
227
228         struct drm_mm_node *align_splitoff = NULL;
229         struct drm_mm_node *child;
230         unsigned tmp = 0;
231
232         if (alignment)
233                 tmp = parent->start % alignment;
234
235         if (tmp) {
236                 align_splitoff =
237                     drm_mm_split_at_start(parent, alignment - tmp, 1);
238                 if (unlikely(align_splitoff == NULL))
239                         return NULL;
240         }
241
242         if (parent->size == size) {
243                 list_del_init(&parent->fl_entry);
244                 parent->free = 0;
245                 return parent;
246         } else {
247                 child = drm_mm_split_at_start(parent, size, 1);
248         }
249
250         if (align_splitoff)
251                 drm_mm_put_block(align_splitoff);
252
253         return child;
254 }
255 EXPORT_SYMBOL(drm_mm_get_block_atomic);
256
257 /*
258  * Put a block. Merge with the previous and / or next block if they are free.
259  * Otherwise add to the free stack.
260  */
261
262 void drm_mm_put_block(struct drm_mm_node *cur)
263 {
264
265         struct drm_mm *mm = cur->mm;
266         struct list_head *cur_head = &cur->ml_entry;
267         struct list_head *root_head = &mm->ml_entry;
268         struct drm_mm_node *prev_node = NULL;
269         struct drm_mm_node *next_node;
270
271         int merged = 0;
272
273         if (cur_head->prev != root_head) {
274                 prev_node =
275                     list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
276                 if (prev_node->free) {
277                         prev_node->size += cur->size;
278                         merged = 1;
279                 }
280         }
281         if (cur_head->next != root_head) {
282                 next_node =
283                     list_entry(cur_head->next, struct drm_mm_node, ml_entry);
284                 if (next_node->free) {
285                         if (merged) {
286                                 prev_node->size += next_node->size;
287                                 list_del(&next_node->ml_entry);
288                                 list_del(&next_node->fl_entry);
289                                 if (mm->num_unused < MM_UNUSED_TARGET) {
290                                         list_add(&next_node->fl_entry,
291                                                  &mm->unused_nodes);
292                                         ++mm->num_unused;
293                                 } else
294                                         kfree(next_node);
295                         } else {
296                                 next_node->size += cur->size;
297                                 next_node->start = cur->start;
298                                 merged = 1;
299                         }
300                 }
301         }
302         if (!merged) {
303                 cur->free = 1;
304                 list_add(&cur->fl_entry, &mm->fl_entry);
305         } else {
306                 list_del(&cur->ml_entry);
307                 if (mm->num_unused < MM_UNUSED_TARGET) {
308                         list_add(&cur->fl_entry, &mm->unused_nodes);
309                         ++mm->num_unused;
310                 } else
311                         kfree(cur);
312         }
313 }
314
315 EXPORT_SYMBOL(drm_mm_put_block);
316
317 struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
318                                        unsigned long size,
319                                        unsigned alignment, int best_match)
320 {
321         struct list_head *list;
322         const struct list_head *free_stack = &mm->fl_entry;
323         struct drm_mm_node *entry;
324         struct drm_mm_node *best;
325         unsigned long best_size;
326         unsigned wasted;
327
328         best = NULL;
329         best_size = ~0UL;
330
331         list_for_each(list, free_stack) {
332                 entry = list_entry(list, struct drm_mm_node, fl_entry);
333                 wasted = 0;
334
335                 if (entry->size < size)
336                         continue;
337
338                 if (alignment) {
339                         register unsigned tmp = entry->start % alignment;
340                         if (tmp)
341                                 wasted += alignment - tmp;
342                 }
343
344                 if (entry->size >= size + wasted) {
345                         if (!best_match)
346                                 return entry;
347                         if (size < best_size) {
348                                 best = entry;
349                                 best_size = entry->size;
350                         }
351                 }
352         }
353
354         return best;
355 }
356 EXPORT_SYMBOL(drm_mm_search_free);
357
358 int drm_mm_clean(struct drm_mm * mm)
359 {
360         struct list_head *head = &mm->ml_entry;
361
362         return (head->next->next == head);
363 }
364 EXPORT_SYMBOL(drm_mm_clean);
365
366 int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
367 {
368         INIT_LIST_HEAD(&mm->ml_entry);
369         INIT_LIST_HEAD(&mm->fl_entry);
370         INIT_LIST_HEAD(&mm->unused_nodes);
371         mm->num_unused = 0;
372         spin_lock_init(&mm->unused_lock);
373
374         return drm_mm_create_tail_node(mm, start, size, 0);
375 }
376 EXPORT_SYMBOL(drm_mm_init);
377
378 void drm_mm_takedown(struct drm_mm * mm)
379 {
380         struct list_head *bnode = mm->fl_entry.next;
381         struct drm_mm_node *entry;
382         struct drm_mm_node *next;
383
384         entry = list_entry(bnode, struct drm_mm_node, fl_entry);
385
386         if (entry->ml_entry.next != &mm->ml_entry ||
387             entry->fl_entry.next != &mm->fl_entry) {
388                 DRM_ERROR("Memory manager not clean. Delaying takedown\n");
389                 return;
390         }
391
392         list_del(&entry->fl_entry);
393         list_del(&entry->ml_entry);
394         kfree(entry);
395
396         spin_lock(&mm->unused_lock);
397         list_for_each_entry_safe(entry, next, &mm->unused_nodes, fl_entry) {
398                 list_del(&entry->fl_entry);
399                 kfree(entry);
400                 --mm->num_unused;
401         }
402         spin_unlock(&mm->unused_lock);
403
404         BUG_ON(mm->num_unused != 0);
405 }
406 EXPORT_SYMBOL(drm_mm_takedown);