drm/ttm: Make sure system buffer objects has offset == 0.
[linux-2.6.git] / drivers / gpu / drm / ttm / ttm_bo.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30 /* Notes:
31  *
32  * We store bo pointer in drm_mm_node struct so we know which bo own a
33  * specific node. There is no protection on the pointer, thus to make
34  * sure things don't go berserk you have to access this pointer while
35  * holding the global lru lock and make sure anytime you free a node you
36  * reset the pointer to NULL.
37  */
38
39 #include "ttm/ttm_module.h"
40 #include "ttm/ttm_bo_driver.h"
41 #include "ttm/ttm_placement.h"
42 #include <linux/jiffies.h>
43 #include <linux/slab.h>
44 #include <linux/sched.h>
45 #include <linux/mm.h>
46 #include <linux/file.h>
47 #include <linux/module.h>
48
49 #define TTM_ASSERT_LOCKED(param)
50 #define TTM_DEBUG(fmt, arg...)
51 #define TTM_BO_HASH_ORDER 13
52
53 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
54 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
55 static void ttm_bo_global_kobj_release(struct kobject *kobj);
56
57 static struct attribute ttm_bo_count = {
58         .name = "bo_count",
59         .mode = S_IRUGO
60 };
61
62 static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
63 {
64         int i;
65
66         for (i = 0; i <= TTM_PL_PRIV5; i++)
67                 if (flags & (1 << i)) {
68                         *mem_type = i;
69                         return 0;
70                 }
71         return -EINVAL;
72 }
73
74 static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
75 {
76         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
77
78         printk(KERN_ERR TTM_PFX "    has_type: %d\n", man->has_type);
79         printk(KERN_ERR TTM_PFX "    use_type: %d\n", man->use_type);
80         printk(KERN_ERR TTM_PFX "    flags: 0x%08X\n", man->flags);
81         printk(KERN_ERR TTM_PFX "    gpu_offset: 0x%08lX\n", man->gpu_offset);
82         printk(KERN_ERR TTM_PFX "    io_offset: 0x%08lX\n", man->io_offset);
83         printk(KERN_ERR TTM_PFX "    io_size: %ld\n", man->io_size);
84         printk(KERN_ERR TTM_PFX "    size: %llu\n", man->size);
85         printk(KERN_ERR TTM_PFX "    available_caching: 0x%08X\n",
86                 man->available_caching);
87         printk(KERN_ERR TTM_PFX "    default_caching: 0x%08X\n",
88                 man->default_caching);
89         if (mem_type != TTM_PL_SYSTEM) {
90                 spin_lock(&bdev->glob->lru_lock);
91                 drm_mm_debug_table(&man->manager, TTM_PFX);
92                 spin_unlock(&bdev->glob->lru_lock);
93         }
94 }
95
96 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
97                                         struct ttm_placement *placement)
98 {
99         int i, ret, mem_type;
100
101         printk(KERN_ERR TTM_PFX "No space for %p (%lu pages, %luK, %luM)\n",
102                 bo, bo->mem.num_pages, bo->mem.size >> 10,
103                 bo->mem.size >> 20);
104         for (i = 0; i < placement->num_placement; i++) {
105                 ret = ttm_mem_type_from_flags(placement->placement[i],
106                                                 &mem_type);
107                 if (ret)
108                         return;
109                 printk(KERN_ERR TTM_PFX "  placement[%d]=0x%08X (%d)\n",
110                         i, placement->placement[i], mem_type);
111                 ttm_mem_type_debug(bo->bdev, mem_type);
112         }
113 }
114
115 static ssize_t ttm_bo_global_show(struct kobject *kobj,
116                                   struct attribute *attr,
117                                   char *buffer)
118 {
119         struct ttm_bo_global *glob =
120                 container_of(kobj, struct ttm_bo_global, kobj);
121
122         return snprintf(buffer, PAGE_SIZE, "%lu\n",
123                         (unsigned long) atomic_read(&glob->bo_count));
124 }
125
126 static struct attribute *ttm_bo_global_attrs[] = {
127         &ttm_bo_count,
128         NULL
129 };
130
131 static struct sysfs_ops ttm_bo_global_ops = {
132         .show = &ttm_bo_global_show
133 };
134
135 static struct kobj_type ttm_bo_glob_kobj_type  = {
136         .release = &ttm_bo_global_kobj_release,
137         .sysfs_ops = &ttm_bo_global_ops,
138         .default_attrs = ttm_bo_global_attrs
139 };
140
141
142 static inline uint32_t ttm_bo_type_flags(unsigned type)
143 {
144         return 1 << (type);
145 }
146
147 static void ttm_bo_release_list(struct kref *list_kref)
148 {
149         struct ttm_buffer_object *bo =
150             container_of(list_kref, struct ttm_buffer_object, list_kref);
151         struct ttm_bo_device *bdev = bo->bdev;
152
153         BUG_ON(atomic_read(&bo->list_kref.refcount));
154         BUG_ON(atomic_read(&bo->kref.refcount));
155         BUG_ON(atomic_read(&bo->cpu_writers));
156         BUG_ON(bo->sync_obj != NULL);
157         BUG_ON(bo->mem.mm_node != NULL);
158         BUG_ON(!list_empty(&bo->lru));
159         BUG_ON(!list_empty(&bo->ddestroy));
160
161         if (bo->ttm)
162                 ttm_tt_destroy(bo->ttm);
163         atomic_dec(&bo->glob->bo_count);
164         if (bo->destroy)
165                 bo->destroy(bo);
166         else {
167                 ttm_mem_global_free(bdev->glob->mem_glob, bo->acc_size);
168                 kfree(bo);
169         }
170 }
171
172 int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
173 {
174
175         if (interruptible) {
176                 int ret = 0;
177
178                 ret = wait_event_interruptible(bo->event_queue,
179                                                atomic_read(&bo->reserved) == 0);
180                 if (unlikely(ret != 0))
181                         return ret;
182         } else {
183                 wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
184         }
185         return 0;
186 }
187 EXPORT_SYMBOL(ttm_bo_wait_unreserved);
188
189 static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
190 {
191         struct ttm_bo_device *bdev = bo->bdev;
192         struct ttm_mem_type_manager *man;
193
194         BUG_ON(!atomic_read(&bo->reserved));
195
196         if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
197
198                 BUG_ON(!list_empty(&bo->lru));
199
200                 man = &bdev->man[bo->mem.mem_type];
201                 list_add_tail(&bo->lru, &man->lru);
202                 kref_get(&bo->list_kref);
203
204                 if (bo->ttm != NULL) {
205                         list_add_tail(&bo->swap, &bo->glob->swap_lru);
206                         kref_get(&bo->list_kref);
207                 }
208         }
209 }
210
211 /**
212  * Call with the lru_lock held.
213  */
214
215 static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
216 {
217         int put_count = 0;
218
219         if (!list_empty(&bo->swap)) {
220                 list_del_init(&bo->swap);
221                 ++put_count;
222         }
223         if (!list_empty(&bo->lru)) {
224                 list_del_init(&bo->lru);
225                 ++put_count;
226         }
227
228         /*
229          * TODO: Add a driver hook to delete from
230          * driver-specific LRU's here.
231          */
232
233         return put_count;
234 }
235
236 int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
237                           bool interruptible,
238                           bool no_wait, bool use_sequence, uint32_t sequence)
239 {
240         struct ttm_bo_global *glob = bo->glob;
241         int ret;
242
243         while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
244                 if (use_sequence && bo->seq_valid &&
245                         (sequence - bo->val_seq < (1 << 31))) {
246                         return -EAGAIN;
247                 }
248
249                 if (no_wait)
250                         return -EBUSY;
251
252                 spin_unlock(&glob->lru_lock);
253                 ret = ttm_bo_wait_unreserved(bo, interruptible);
254                 spin_lock(&glob->lru_lock);
255
256                 if (unlikely(ret))
257                         return ret;
258         }
259
260         if (use_sequence) {
261                 bo->val_seq = sequence;
262                 bo->seq_valid = true;
263         } else {
264                 bo->seq_valid = false;
265         }
266
267         return 0;
268 }
269 EXPORT_SYMBOL(ttm_bo_reserve);
270
271 static void ttm_bo_ref_bug(struct kref *list_kref)
272 {
273         BUG();
274 }
275
276 int ttm_bo_reserve(struct ttm_buffer_object *bo,
277                    bool interruptible,
278                    bool no_wait, bool use_sequence, uint32_t sequence)
279 {
280         struct ttm_bo_global *glob = bo->glob;
281         int put_count = 0;
282         int ret;
283
284         spin_lock(&glob->lru_lock);
285         ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
286                                     sequence);
287         if (likely(ret == 0))
288                 put_count = ttm_bo_del_from_lru(bo);
289         spin_unlock(&glob->lru_lock);
290
291         while (put_count--)
292                 kref_put(&bo->list_kref, ttm_bo_ref_bug);
293
294         return ret;
295 }
296
297 void ttm_bo_unreserve(struct ttm_buffer_object *bo)
298 {
299         struct ttm_bo_global *glob = bo->glob;
300
301         spin_lock(&glob->lru_lock);
302         ttm_bo_add_to_lru(bo);
303         atomic_set(&bo->reserved, 0);
304         wake_up_all(&bo->event_queue);
305         spin_unlock(&glob->lru_lock);
306 }
307 EXPORT_SYMBOL(ttm_bo_unreserve);
308
309 /*
310  * Call bo->mutex locked.
311  */
312 static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
313 {
314         struct ttm_bo_device *bdev = bo->bdev;
315         struct ttm_bo_global *glob = bo->glob;
316         int ret = 0;
317         uint32_t page_flags = 0;
318
319         TTM_ASSERT_LOCKED(&bo->mutex);
320         bo->ttm = NULL;
321
322         if (bdev->need_dma32)
323                 page_flags |= TTM_PAGE_FLAG_DMA32;
324
325         switch (bo->type) {
326         case ttm_bo_type_device:
327                 if (zero_alloc)
328                         page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
329         case ttm_bo_type_kernel:
330                 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
331                                         page_flags, glob->dummy_read_page);
332                 if (unlikely(bo->ttm == NULL))
333                         ret = -ENOMEM;
334                 break;
335         case ttm_bo_type_user:
336                 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
337                                         page_flags | TTM_PAGE_FLAG_USER,
338                                         glob->dummy_read_page);
339                 if (unlikely(bo->ttm == NULL)) {
340                         ret = -ENOMEM;
341                         break;
342                 }
343
344                 ret = ttm_tt_set_user(bo->ttm, current,
345                                       bo->buffer_start, bo->num_pages);
346                 if (unlikely(ret != 0))
347                         ttm_tt_destroy(bo->ttm);
348                 break;
349         default:
350                 printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
351                 ret = -EINVAL;
352                 break;
353         }
354
355         return ret;
356 }
357
358 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
359                                   struct ttm_mem_reg *mem,
360                                   bool evict, bool interruptible, bool no_wait)
361 {
362         struct ttm_bo_device *bdev = bo->bdev;
363         bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
364         bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
365         struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
366         struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
367         int ret = 0;
368
369         if (old_is_pci || new_is_pci ||
370             ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0))
371                 ttm_bo_unmap_virtual(bo);
372
373         /*
374          * Create and bind a ttm if required.
375          */
376
377         if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) {
378                 ret = ttm_bo_add_ttm(bo, false);
379                 if (ret)
380                         goto out_err;
381
382                 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
383                 if (ret)
384                         goto out_err;
385
386                 if (mem->mem_type != TTM_PL_SYSTEM) {
387                         ret = ttm_tt_bind(bo->ttm, mem);
388                         if (ret)
389                                 goto out_err;
390                 }
391
392                 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
393                         bo->mem = *mem;
394                         mem->mm_node = NULL;
395                         goto moved;
396                 }
397
398         }
399
400         if (bdev->driver->move_notify)
401                 bdev->driver->move_notify(bo, mem);
402
403         if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
404             !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
405                 ret = ttm_bo_move_ttm(bo, evict, no_wait, mem);
406         else if (bdev->driver->move)
407                 ret = bdev->driver->move(bo, evict, interruptible,
408                                          no_wait, mem);
409         else
410                 ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem);
411
412         if (ret)
413                 goto out_err;
414
415 moved:
416         if (bo->evicted) {
417                 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
418                 if (ret)
419                         printk(KERN_ERR TTM_PFX "Can not flush read caches\n");
420                 bo->evicted = false;
421         }
422
423         if (bo->mem.mm_node) {
424                 spin_lock(&bo->lock);
425                 bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
426                     bdev->man[bo->mem.mem_type].gpu_offset;
427                 bo->cur_placement = bo->mem.placement;
428                 spin_unlock(&bo->lock);
429         } else
430                 bo->offset = 0;
431
432         return 0;
433
434 out_err:
435         new_man = &bdev->man[bo->mem.mem_type];
436         if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
437                 ttm_tt_unbind(bo->ttm);
438                 ttm_tt_destroy(bo->ttm);
439                 bo->ttm = NULL;
440         }
441
442         return ret;
443 }
444
445 /**
446  * If bo idle, remove from delayed- and lru lists, and unref.
447  * If not idle, and already on delayed list, do nothing.
448  * If not idle, and not on delayed list, put on delayed list,
449  *   up the list_kref and schedule a delayed list check.
450  */
451
452 static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
453 {
454         struct ttm_bo_device *bdev = bo->bdev;
455         struct ttm_bo_global *glob = bo->glob;
456         struct ttm_bo_driver *driver = bdev->driver;
457         int ret;
458
459         spin_lock(&bo->lock);
460         (void) ttm_bo_wait(bo, false, false, !remove_all);
461
462         if (!bo->sync_obj) {
463                 int put_count;
464
465                 spin_unlock(&bo->lock);
466
467                 spin_lock(&glob->lru_lock);
468                 put_count = ttm_bo_del_from_lru(bo);
469
470                 ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
471                 BUG_ON(ret);
472                 if (bo->ttm)
473                         ttm_tt_unbind(bo->ttm);
474
475                 if (!list_empty(&bo->ddestroy)) {
476                         list_del_init(&bo->ddestroy);
477                         ++put_count;
478                 }
479                 if (bo->mem.mm_node) {
480                         bo->mem.mm_node->private = NULL;
481                         drm_mm_put_block(bo->mem.mm_node);
482                         bo->mem.mm_node = NULL;
483                 }
484                 spin_unlock(&glob->lru_lock);
485
486                 atomic_set(&bo->reserved, 0);
487
488                 while (put_count--)
489                         kref_put(&bo->list_kref, ttm_bo_ref_bug);
490
491                 return 0;
492         }
493
494         spin_lock(&glob->lru_lock);
495         if (list_empty(&bo->ddestroy)) {
496                 void *sync_obj = bo->sync_obj;
497                 void *sync_obj_arg = bo->sync_obj_arg;
498
499                 kref_get(&bo->list_kref);
500                 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
501                 spin_unlock(&glob->lru_lock);
502                 spin_unlock(&bo->lock);
503
504                 if (sync_obj)
505                         driver->sync_obj_flush(sync_obj, sync_obj_arg);
506                 schedule_delayed_work(&bdev->wq,
507                                       ((HZ / 100) < 1) ? 1 : HZ / 100);
508                 ret = 0;
509
510         } else {
511                 spin_unlock(&glob->lru_lock);
512                 spin_unlock(&bo->lock);
513                 ret = -EBUSY;
514         }
515
516         return ret;
517 }
518
519 /**
520  * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
521  * encountered buffers.
522  */
523
524 static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
525 {
526         struct ttm_bo_global *glob = bdev->glob;
527         struct ttm_buffer_object *entry, *nentry;
528         struct list_head *list, *next;
529         int ret;
530
531         spin_lock(&glob->lru_lock);
532         list_for_each_safe(list, next, &bdev->ddestroy) {
533                 entry = list_entry(list, struct ttm_buffer_object, ddestroy);
534                 nentry = NULL;
535
536                 /*
537                  * Protect the next list entry from destruction while we
538                  * unlock the lru_lock.
539                  */
540
541                 if (next != &bdev->ddestroy) {
542                         nentry = list_entry(next, struct ttm_buffer_object,
543                                             ddestroy);
544                         kref_get(&nentry->list_kref);
545                 }
546                 kref_get(&entry->list_kref);
547
548                 spin_unlock(&glob->lru_lock);
549                 ret = ttm_bo_cleanup_refs(entry, remove_all);
550                 kref_put(&entry->list_kref, ttm_bo_release_list);
551
552                 spin_lock(&glob->lru_lock);
553                 if (nentry) {
554                         bool next_onlist = !list_empty(next);
555                         spin_unlock(&glob->lru_lock);
556                         kref_put(&nentry->list_kref, ttm_bo_release_list);
557                         spin_lock(&glob->lru_lock);
558                         /*
559                          * Someone might have raced us and removed the
560                          * next entry from the list. We don't bother restarting
561                          * list traversal.
562                          */
563
564                         if (!next_onlist)
565                                 break;
566                 }
567                 if (ret)
568                         break;
569         }
570         ret = !list_empty(&bdev->ddestroy);
571         spin_unlock(&glob->lru_lock);
572
573         return ret;
574 }
575
576 static void ttm_bo_delayed_workqueue(struct work_struct *work)
577 {
578         struct ttm_bo_device *bdev =
579             container_of(work, struct ttm_bo_device, wq.work);
580
581         if (ttm_bo_delayed_delete(bdev, false)) {
582                 schedule_delayed_work(&bdev->wq,
583                                       ((HZ / 100) < 1) ? 1 : HZ / 100);
584         }
585 }
586
587 static void ttm_bo_release(struct kref *kref)
588 {
589         struct ttm_buffer_object *bo =
590             container_of(kref, struct ttm_buffer_object, kref);
591         struct ttm_bo_device *bdev = bo->bdev;
592
593         if (likely(bo->vm_node != NULL)) {
594                 rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
595                 drm_mm_put_block(bo->vm_node);
596                 bo->vm_node = NULL;
597         }
598         write_unlock(&bdev->vm_lock);
599         ttm_bo_cleanup_refs(bo, false);
600         kref_put(&bo->list_kref, ttm_bo_release_list);
601         write_lock(&bdev->vm_lock);
602 }
603
604 void ttm_bo_unref(struct ttm_buffer_object **p_bo)
605 {
606         struct ttm_buffer_object *bo = *p_bo;
607         struct ttm_bo_device *bdev = bo->bdev;
608
609         *p_bo = NULL;
610         write_lock(&bdev->vm_lock);
611         kref_put(&bo->kref, ttm_bo_release);
612         write_unlock(&bdev->vm_lock);
613 }
614 EXPORT_SYMBOL(ttm_bo_unref);
615
616 static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
617                         bool no_wait)
618 {
619         struct ttm_bo_device *bdev = bo->bdev;
620         struct ttm_bo_global *glob = bo->glob;
621         struct ttm_mem_reg evict_mem;
622         struct ttm_placement placement;
623         int ret = 0;
624
625         spin_lock(&bo->lock);
626         ret = ttm_bo_wait(bo, false, interruptible, no_wait);
627         spin_unlock(&bo->lock);
628
629         if (unlikely(ret != 0)) {
630                 if (ret != -ERESTARTSYS) {
631                         printk(KERN_ERR TTM_PFX
632                                "Failed to expire sync object before "
633                                "buffer eviction.\n");
634                 }
635                 goto out;
636         }
637
638         BUG_ON(!atomic_read(&bo->reserved));
639
640         evict_mem = bo->mem;
641         evict_mem.mm_node = NULL;
642
643         placement.fpfn = 0;
644         placement.lpfn = 0;
645         placement.num_placement = 0;
646         placement.num_busy_placement = 0;
647         bdev->driver->evict_flags(bo, &placement);
648         ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
649                                 no_wait);
650         if (ret) {
651                 if (ret != -ERESTARTSYS) {
652                         printk(KERN_ERR TTM_PFX
653                                "Failed to find memory space for "
654                                "buffer 0x%p eviction.\n", bo);
655                         ttm_bo_mem_space_debug(bo, &placement);
656                 }
657                 goto out;
658         }
659
660         ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
661                                      no_wait);
662         if (ret) {
663                 if (ret != -ERESTARTSYS)
664                         printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
665                 spin_lock(&glob->lru_lock);
666                 if (evict_mem.mm_node) {
667                         evict_mem.mm_node->private = NULL;
668                         drm_mm_put_block(evict_mem.mm_node);
669                         evict_mem.mm_node = NULL;
670                 }
671                 spin_unlock(&glob->lru_lock);
672                 goto out;
673         }
674         bo->evicted = true;
675 out:
676         return ret;
677 }
678
679 static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
680                                 uint32_t mem_type,
681                                 bool interruptible, bool no_wait)
682 {
683         struct ttm_bo_global *glob = bdev->glob;
684         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
685         struct ttm_buffer_object *bo;
686         int ret, put_count = 0;
687
688 retry:
689         spin_lock(&glob->lru_lock);
690         if (list_empty(&man->lru)) {
691                 spin_unlock(&glob->lru_lock);
692                 return -EBUSY;
693         }
694
695         bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
696         kref_get(&bo->list_kref);
697
698         ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
699
700         if (unlikely(ret == -EBUSY)) {
701                 spin_unlock(&glob->lru_lock);
702                 if (likely(!no_wait))
703                         ret = ttm_bo_wait_unreserved(bo, interruptible);
704
705                 kref_put(&bo->list_kref, ttm_bo_release_list);
706
707                 /**
708                  * We *need* to retry after releasing the lru lock.
709                  */
710
711                 if (unlikely(ret != 0))
712                         return ret;
713                 goto retry;
714         }
715
716         put_count = ttm_bo_del_from_lru(bo);
717         spin_unlock(&glob->lru_lock);
718
719         BUG_ON(ret != 0);
720
721         while (put_count--)
722                 kref_put(&bo->list_kref, ttm_bo_ref_bug);
723
724         ret = ttm_bo_evict(bo, interruptible, no_wait);
725         ttm_bo_unreserve(bo);
726
727         kref_put(&bo->list_kref, ttm_bo_release_list);
728         return ret;
729 }
730
731 static int ttm_bo_man_get_node(struct ttm_buffer_object *bo,
732                                 struct ttm_mem_type_manager *man,
733                                 struct ttm_placement *placement,
734                                 struct ttm_mem_reg *mem,
735                                 struct drm_mm_node **node)
736 {
737         struct ttm_bo_global *glob = bo->glob;
738         unsigned long lpfn;
739         int ret;
740
741         lpfn = placement->lpfn;
742         if (!lpfn)
743                 lpfn = man->size;
744         *node = NULL;
745         do {
746                 ret = drm_mm_pre_get(&man->manager);
747                 if (unlikely(ret))
748                         return ret;
749
750                 spin_lock(&glob->lru_lock);
751                 *node = drm_mm_search_free_in_range(&man->manager,
752                                         mem->num_pages, mem->page_alignment,
753                                         placement->fpfn, lpfn, 1);
754                 if (unlikely(*node == NULL)) {
755                         spin_unlock(&glob->lru_lock);
756                         return 0;
757                 }
758                 *node = drm_mm_get_block_atomic_range(*node, mem->num_pages,
759                                                         mem->page_alignment,
760                                                         placement->fpfn,
761                                                         lpfn);
762                 spin_unlock(&glob->lru_lock);
763         } while (*node == NULL);
764         return 0;
765 }
766
767 /**
768  * Repeatedly evict memory from the LRU for @mem_type until we create enough
769  * space, or we've evicted everything and there isn't enough space.
770  */
771 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
772                                         uint32_t mem_type,
773                                         struct ttm_placement *placement,
774                                         struct ttm_mem_reg *mem,
775                                         bool interruptible, bool no_wait)
776 {
777         struct ttm_bo_device *bdev = bo->bdev;
778         struct ttm_bo_global *glob = bdev->glob;
779         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
780         struct drm_mm_node *node;
781         int ret;
782
783         do {
784                 ret = ttm_bo_man_get_node(bo, man, placement, mem, &node);
785                 if (unlikely(ret != 0))
786                         return ret;
787                 if (node)
788                         break;
789                 spin_lock(&glob->lru_lock);
790                 if (list_empty(&man->lru)) {
791                         spin_unlock(&glob->lru_lock);
792                         break;
793                 }
794                 spin_unlock(&glob->lru_lock);
795                 ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
796                                                 no_wait);
797                 if (unlikely(ret != 0))
798                         return ret;
799         } while (1);
800         if (node == NULL)
801                 return -ENOMEM;
802         mem->mm_node = node;
803         mem->mem_type = mem_type;
804         return 0;
805 }
806
807 static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
808                                       uint32_t cur_placement,
809                                       uint32_t proposed_placement)
810 {
811         uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
812         uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
813
814         /**
815          * Keep current caching if possible.
816          */
817
818         if ((cur_placement & caching) != 0)
819                 result |= (cur_placement & caching);
820         else if ((man->default_caching & caching) != 0)
821                 result |= man->default_caching;
822         else if ((TTM_PL_FLAG_CACHED & caching) != 0)
823                 result |= TTM_PL_FLAG_CACHED;
824         else if ((TTM_PL_FLAG_WC & caching) != 0)
825                 result |= TTM_PL_FLAG_WC;
826         else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
827                 result |= TTM_PL_FLAG_UNCACHED;
828
829         return result;
830 }
831
832 static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
833                                  bool disallow_fixed,
834                                  uint32_t mem_type,
835                                  uint32_t proposed_placement,
836                                  uint32_t *masked_placement)
837 {
838         uint32_t cur_flags = ttm_bo_type_flags(mem_type);
839
840         if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
841                 return false;
842
843         if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
844                 return false;
845
846         if ((proposed_placement & man->available_caching) == 0)
847                 return false;
848
849         cur_flags |= (proposed_placement & man->available_caching);
850
851         *masked_placement = cur_flags;
852         return true;
853 }
854
855 /**
856  * Creates space for memory region @mem according to its type.
857  *
858  * This function first searches for free space in compatible memory types in
859  * the priority order defined by the driver.  If free space isn't found, then
860  * ttm_bo_mem_force_space is attempted in priority order to evict and find
861  * space.
862  */
863 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
864                         struct ttm_placement *placement,
865                         struct ttm_mem_reg *mem,
866                         bool interruptible, bool no_wait)
867 {
868         struct ttm_bo_device *bdev = bo->bdev;
869         struct ttm_mem_type_manager *man;
870         uint32_t mem_type = TTM_PL_SYSTEM;
871         uint32_t cur_flags = 0;
872         bool type_found = false;
873         bool type_ok = false;
874         bool has_erestartsys = false;
875         struct drm_mm_node *node = NULL;
876         int i, ret;
877
878         mem->mm_node = NULL;
879         for (i = 0; i < placement->num_placement; ++i) {
880                 ret = ttm_mem_type_from_flags(placement->placement[i],
881                                                 &mem_type);
882                 if (ret)
883                         return ret;
884                 man = &bdev->man[mem_type];
885
886                 type_ok = ttm_bo_mt_compatible(man,
887                                                 bo->type == ttm_bo_type_user,
888                                                 mem_type,
889                                                 placement->placement[i],
890                                                 &cur_flags);
891
892                 if (!type_ok)
893                         continue;
894
895                 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
896                                                   cur_flags);
897                 /*
898                  * Use the access and other non-mapping-related flag bits from
899                  * the memory placement flags to the current flags
900                  */
901                 ttm_flag_masked(&cur_flags, placement->placement[i],
902                                 ~TTM_PL_MASK_MEMTYPE);
903
904                 if (mem_type == TTM_PL_SYSTEM)
905                         break;
906
907                 if (man->has_type && man->use_type) {
908                         type_found = true;
909                         ret = ttm_bo_man_get_node(bo, man, placement, mem,
910                                                         &node);
911                         if (unlikely(ret))
912                                 return ret;
913                 }
914                 if (node)
915                         break;
916         }
917
918         if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) {
919                 mem->mm_node = node;
920                 mem->mem_type = mem_type;
921                 mem->placement = cur_flags;
922                 if (node)
923                         node->private = bo;
924                 return 0;
925         }
926
927         if (!type_found)
928                 return -EINVAL;
929
930         for (i = 0; i < placement->num_busy_placement; ++i) {
931                 ret = ttm_mem_type_from_flags(placement->busy_placement[i],
932                                                 &mem_type);
933                 if (ret)
934                         return ret;
935                 man = &bdev->man[mem_type];
936                 if (!man->has_type)
937                         continue;
938                 if (!ttm_bo_mt_compatible(man,
939                                                 bo->type == ttm_bo_type_user,
940                                                 mem_type,
941                                                 placement->busy_placement[i],
942                                                 &cur_flags))
943                         continue;
944
945                 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
946                                                   cur_flags);
947                 /*
948                  * Use the access and other non-mapping-related flag bits from
949                  * the memory placement flags to the current flags
950                  */
951                 ttm_flag_masked(&cur_flags, placement->busy_placement[i],
952                                 ~TTM_PL_MASK_MEMTYPE);
953
954                 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
955                                                 interruptible, no_wait);
956                 if (ret == 0 && mem->mm_node) {
957                         mem->placement = cur_flags;
958                         mem->mm_node->private = bo;
959                         return 0;
960                 }
961                 if (ret == -ERESTARTSYS)
962                         has_erestartsys = true;
963         }
964         ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
965         return ret;
966 }
967 EXPORT_SYMBOL(ttm_bo_mem_space);
968
969 int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
970 {
971         if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
972                 return -EBUSY;
973
974         return wait_event_interruptible(bo->event_queue,
975                                         atomic_read(&bo->cpu_writers) == 0);
976 }
977 EXPORT_SYMBOL(ttm_bo_wait_cpu);
978
979 int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
980                         struct ttm_placement *placement,
981                         bool interruptible, bool no_wait)
982 {
983         struct ttm_bo_global *glob = bo->glob;
984         int ret = 0;
985         struct ttm_mem_reg mem;
986
987         BUG_ON(!atomic_read(&bo->reserved));
988
989         /*
990          * FIXME: It's possible to pipeline buffer moves.
991          * Have the driver move function wait for idle when necessary,
992          * instead of doing it here.
993          */
994         spin_lock(&bo->lock);
995         ret = ttm_bo_wait(bo, false, interruptible, no_wait);
996         spin_unlock(&bo->lock);
997         if (ret)
998                 return ret;
999         mem.num_pages = bo->num_pages;
1000         mem.size = mem.num_pages << PAGE_SHIFT;
1001         mem.page_alignment = bo->mem.page_alignment;
1002         /*
1003          * Determine where to move the buffer.
1004          */
1005         ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait);
1006         if (ret)
1007                 goto out_unlock;
1008         ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
1009 out_unlock:
1010         if (ret && mem.mm_node) {
1011                 spin_lock(&glob->lru_lock);
1012                 mem.mm_node->private = NULL;
1013                 drm_mm_put_block(mem.mm_node);
1014                 spin_unlock(&glob->lru_lock);
1015         }
1016         return ret;
1017 }
1018
1019 static int ttm_bo_mem_compat(struct ttm_placement *placement,
1020                              struct ttm_mem_reg *mem)
1021 {
1022         int i;
1023
1024         for (i = 0; i < placement->num_placement; i++) {
1025                 if ((placement->placement[i] & mem->placement &
1026                         TTM_PL_MASK_CACHING) &&
1027                         (placement->placement[i] & mem->placement &
1028                         TTM_PL_MASK_MEM))
1029                         return i;
1030         }
1031         return -1;
1032 }
1033
1034 int ttm_bo_validate(struct ttm_buffer_object *bo,
1035                         struct ttm_placement *placement,
1036                         bool interruptible, bool no_wait)
1037 {
1038         int ret;
1039
1040         BUG_ON(!atomic_read(&bo->reserved));
1041         /* Check that range is valid */
1042         if (placement->lpfn || placement->fpfn)
1043                 if (placement->fpfn > placement->lpfn ||
1044                         (placement->lpfn - placement->fpfn) < bo->num_pages)
1045                         return -EINVAL;
1046         /*
1047          * Check whether we need to move buffer.
1048          */
1049         ret = ttm_bo_mem_compat(placement, &bo->mem);
1050         if (ret < 0) {
1051                 ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait);
1052                 if (ret)
1053                         return ret;
1054         } else {
1055                 /*
1056                  * Use the access and other non-mapping-related flag bits from
1057                  * the compatible memory placement flags to the active flags
1058                  */
1059                 ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
1060                                 ~TTM_PL_MASK_MEMTYPE);
1061         }
1062         /*
1063          * We might need to add a TTM.
1064          */
1065         if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
1066                 ret = ttm_bo_add_ttm(bo, true);
1067                 if (ret)
1068                         return ret;
1069         }
1070         return 0;
1071 }
1072 EXPORT_SYMBOL(ttm_bo_validate);
1073
1074 int ttm_bo_check_placement(struct ttm_buffer_object *bo,
1075                                 struct ttm_placement *placement)
1076 {
1077         int i;
1078
1079         if (placement->fpfn || placement->lpfn) {
1080                 if (bo->mem.num_pages > (placement->lpfn - placement->fpfn)) {
1081                         printk(KERN_ERR TTM_PFX "Page number range to small "
1082                                 "Need %lu pages, range is [%u, %u]\n",
1083                                 bo->mem.num_pages, placement->fpfn,
1084                                 placement->lpfn);
1085                         return -EINVAL;
1086                 }
1087         }
1088         for (i = 0; i < placement->num_placement; i++) {
1089                 if (!capable(CAP_SYS_ADMIN)) {
1090                         if (placement->placement[i] & TTM_PL_FLAG_NO_EVICT) {
1091                                 printk(KERN_ERR TTM_PFX "Need to be root to "
1092                                         "modify NO_EVICT status.\n");
1093                                 return -EINVAL;
1094                         }
1095                 }
1096         }
1097         for (i = 0; i < placement->num_busy_placement; i++) {
1098                 if (!capable(CAP_SYS_ADMIN)) {
1099                         if (placement->busy_placement[i] & TTM_PL_FLAG_NO_EVICT) {
1100                                 printk(KERN_ERR TTM_PFX "Need to be root to "
1101                                         "modify NO_EVICT status.\n");
1102                                 return -EINVAL;
1103                         }
1104                 }
1105         }
1106         return 0;
1107 }
1108
1109 int ttm_bo_init(struct ttm_bo_device *bdev,
1110                 struct ttm_buffer_object *bo,
1111                 unsigned long size,
1112                 enum ttm_bo_type type,
1113                 struct ttm_placement *placement,
1114                 uint32_t page_alignment,
1115                 unsigned long buffer_start,
1116                 bool interruptible,
1117                 struct file *persistant_swap_storage,
1118                 size_t acc_size,
1119                 void (*destroy) (struct ttm_buffer_object *))
1120 {
1121         int ret = 0;
1122         unsigned long num_pages;
1123
1124         size += buffer_start & ~PAGE_MASK;
1125         num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1126         if (num_pages == 0) {
1127                 printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
1128                 return -EINVAL;
1129         }
1130         bo->destroy = destroy;
1131
1132         spin_lock_init(&bo->lock);
1133         kref_init(&bo->kref);
1134         kref_init(&bo->list_kref);
1135         atomic_set(&bo->cpu_writers, 0);
1136         atomic_set(&bo->reserved, 1);
1137         init_waitqueue_head(&bo->event_queue);
1138         INIT_LIST_HEAD(&bo->lru);
1139         INIT_LIST_HEAD(&bo->ddestroy);
1140         INIT_LIST_HEAD(&bo->swap);
1141         bo->bdev = bdev;
1142         bo->glob = bdev->glob;
1143         bo->type = type;
1144         bo->num_pages = num_pages;
1145         bo->mem.size = num_pages << PAGE_SHIFT;
1146         bo->mem.mem_type = TTM_PL_SYSTEM;
1147         bo->mem.num_pages = bo->num_pages;
1148         bo->mem.mm_node = NULL;
1149         bo->mem.page_alignment = page_alignment;
1150         bo->buffer_start = buffer_start & PAGE_MASK;
1151         bo->priv_flags = 0;
1152         bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1153         bo->seq_valid = false;
1154         bo->persistant_swap_storage = persistant_swap_storage;
1155         bo->acc_size = acc_size;
1156         atomic_inc(&bo->glob->bo_count);
1157
1158         ret = ttm_bo_check_placement(bo, placement);
1159         if (unlikely(ret != 0))
1160                 goto out_err;
1161
1162         /*
1163          * For ttm_bo_type_device buffers, allocate
1164          * address space from the device.
1165          */
1166         if (bo->type == ttm_bo_type_device) {
1167                 ret = ttm_bo_setup_vm(bo);
1168                 if (ret)
1169                         goto out_err;
1170         }
1171
1172         ret = ttm_bo_validate(bo, placement, interruptible, false);
1173         if (ret)
1174                 goto out_err;
1175
1176         ttm_bo_unreserve(bo);
1177         return 0;
1178
1179 out_err:
1180         ttm_bo_unreserve(bo);
1181         ttm_bo_unref(&bo);
1182
1183         return ret;
1184 }
1185 EXPORT_SYMBOL(ttm_bo_init);
1186
1187 static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
1188                                  unsigned long num_pages)
1189 {
1190         size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
1191             PAGE_MASK;
1192
1193         return glob->ttm_bo_size + 2 * page_array_size;
1194 }
1195
1196 int ttm_bo_create(struct ttm_bo_device *bdev,
1197                         unsigned long size,
1198                         enum ttm_bo_type type,
1199                         struct ttm_placement *placement,
1200                         uint32_t page_alignment,
1201                         unsigned long buffer_start,
1202                         bool interruptible,
1203                         struct file *persistant_swap_storage,
1204                         struct ttm_buffer_object **p_bo)
1205 {
1206         struct ttm_buffer_object *bo;
1207         struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1208         int ret;
1209
1210         size_t acc_size =
1211             ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
1212         ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
1213         if (unlikely(ret != 0))
1214                 return ret;
1215
1216         bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1217
1218         if (unlikely(bo == NULL)) {
1219                 ttm_mem_global_free(mem_glob, acc_size);
1220                 return -ENOMEM;
1221         }
1222
1223         ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1224                                 buffer_start, interruptible,
1225                                 persistant_swap_storage, acc_size, NULL);
1226         if (likely(ret == 0))
1227                 *p_bo = bo;
1228
1229         return ret;
1230 }
1231
1232 static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1233                                         unsigned mem_type, bool allow_errors)
1234 {
1235         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1236         struct ttm_bo_global *glob = bdev->glob;
1237         int ret;
1238
1239         /*
1240          * Can't use standard list traversal since we're unlocking.
1241          */
1242
1243         spin_lock(&glob->lru_lock);
1244         while (!list_empty(&man->lru)) {
1245                 spin_unlock(&glob->lru_lock);
1246                 ret = ttm_mem_evict_first(bdev, mem_type, false, false);
1247                 if (ret) {
1248                         if (allow_errors) {
1249                                 return ret;
1250                         } else {
1251                                 printk(KERN_ERR TTM_PFX
1252                                         "Cleanup eviction failed\n");
1253                         }
1254                 }
1255                 spin_lock(&glob->lru_lock);
1256         }
1257         spin_unlock(&glob->lru_lock);
1258         return 0;
1259 }
1260
1261 int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1262 {
1263         struct ttm_bo_global *glob = bdev->glob;
1264         struct ttm_mem_type_manager *man;
1265         int ret = -EINVAL;
1266
1267         if (mem_type >= TTM_NUM_MEM_TYPES) {
1268                 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type);
1269                 return ret;
1270         }
1271         man = &bdev->man[mem_type];
1272
1273         if (!man->has_type) {
1274                 printk(KERN_ERR TTM_PFX "Trying to take down uninitialized "
1275                        "memory manager type %u\n", mem_type);
1276                 return ret;
1277         }
1278
1279         man->use_type = false;
1280         man->has_type = false;
1281
1282         ret = 0;
1283         if (mem_type > 0) {
1284                 ttm_bo_force_list_clean(bdev, mem_type, false);
1285
1286                 spin_lock(&glob->lru_lock);
1287                 if (drm_mm_clean(&man->manager))
1288                         drm_mm_takedown(&man->manager);
1289                 else
1290                         ret = -EBUSY;
1291
1292                 spin_unlock(&glob->lru_lock);
1293         }
1294
1295         return ret;
1296 }
1297 EXPORT_SYMBOL(ttm_bo_clean_mm);
1298
1299 int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1300 {
1301         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1302
1303         if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
1304                 printk(KERN_ERR TTM_PFX
1305                        "Illegal memory manager memory type %u.\n",
1306                        mem_type);
1307                 return -EINVAL;
1308         }
1309
1310         if (!man->has_type) {
1311                 printk(KERN_ERR TTM_PFX
1312                        "Memory type %u has not been initialized.\n",
1313                        mem_type);
1314                 return 0;
1315         }
1316
1317         return ttm_bo_force_list_clean(bdev, mem_type, true);
1318 }
1319 EXPORT_SYMBOL(ttm_bo_evict_mm);
1320
1321 int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1322                         unsigned long p_size)
1323 {
1324         int ret = -EINVAL;
1325         struct ttm_mem_type_manager *man;
1326
1327         if (type >= TTM_NUM_MEM_TYPES) {
1328                 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type);
1329                 return ret;
1330         }
1331
1332         man = &bdev->man[type];
1333         if (man->has_type) {
1334                 printk(KERN_ERR TTM_PFX
1335                        "Memory manager already initialized for type %d\n",
1336                        type);
1337                 return ret;
1338         }
1339
1340         ret = bdev->driver->init_mem_type(bdev, type, man);
1341         if (ret)
1342                 return ret;
1343
1344         ret = 0;
1345         if (type != TTM_PL_SYSTEM) {
1346                 if (!p_size) {
1347                         printk(KERN_ERR TTM_PFX
1348                                "Zero size memory manager type %d\n",
1349                                type);
1350                         return ret;
1351                 }
1352                 ret = drm_mm_init(&man->manager, 0, p_size);
1353                 if (ret)
1354                         return ret;
1355         }
1356         man->has_type = true;
1357         man->use_type = true;
1358         man->size = p_size;
1359
1360         INIT_LIST_HEAD(&man->lru);
1361
1362         return 0;
1363 }
1364 EXPORT_SYMBOL(ttm_bo_init_mm);
1365
1366 static void ttm_bo_global_kobj_release(struct kobject *kobj)
1367 {
1368         struct ttm_bo_global *glob =
1369                 container_of(kobj, struct ttm_bo_global, kobj);
1370
1371         ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
1372         __free_page(glob->dummy_read_page);
1373         kfree(glob);
1374 }
1375
1376 void ttm_bo_global_release(struct ttm_global_reference *ref)
1377 {
1378         struct ttm_bo_global *glob = ref->object;
1379
1380         kobject_del(&glob->kobj);
1381         kobject_put(&glob->kobj);
1382 }
1383 EXPORT_SYMBOL(ttm_bo_global_release);
1384
1385 int ttm_bo_global_init(struct ttm_global_reference *ref)
1386 {
1387         struct ttm_bo_global_ref *bo_ref =
1388                 container_of(ref, struct ttm_bo_global_ref, ref);
1389         struct ttm_bo_global *glob = ref->object;
1390         int ret;
1391
1392         mutex_init(&glob->device_list_mutex);
1393         spin_lock_init(&glob->lru_lock);
1394         glob->mem_glob = bo_ref->mem_glob;
1395         glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1396
1397         if (unlikely(glob->dummy_read_page == NULL)) {
1398                 ret = -ENOMEM;
1399                 goto out_no_drp;
1400         }
1401
1402         INIT_LIST_HEAD(&glob->swap_lru);
1403         INIT_LIST_HEAD(&glob->device_list);
1404
1405         ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
1406         ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
1407         if (unlikely(ret != 0)) {
1408                 printk(KERN_ERR TTM_PFX
1409                        "Could not register buffer object swapout.\n");
1410                 goto out_no_shrink;
1411         }
1412
1413         glob->ttm_bo_extra_size =
1414                 ttm_round_pot(sizeof(struct ttm_tt)) +
1415                 ttm_round_pot(sizeof(struct ttm_backend));
1416
1417         glob->ttm_bo_size = glob->ttm_bo_extra_size +
1418                 ttm_round_pot(sizeof(struct ttm_buffer_object));
1419
1420         atomic_set(&glob->bo_count, 0);
1421
1422         kobject_init(&glob->kobj, &ttm_bo_glob_kobj_type);
1423         ret = kobject_add(&glob->kobj, ttm_get_kobj(), "buffer_objects");
1424         if (unlikely(ret != 0))
1425                 kobject_put(&glob->kobj);
1426         return ret;
1427 out_no_shrink:
1428         __free_page(glob->dummy_read_page);
1429 out_no_drp:
1430         kfree(glob);
1431         return ret;
1432 }
1433 EXPORT_SYMBOL(ttm_bo_global_init);
1434
1435
1436 int ttm_bo_device_release(struct ttm_bo_device *bdev)
1437 {
1438         int ret = 0;
1439         unsigned i = TTM_NUM_MEM_TYPES;
1440         struct ttm_mem_type_manager *man;
1441         struct ttm_bo_global *glob = bdev->glob;
1442
1443         while (i--) {
1444                 man = &bdev->man[i];
1445                 if (man->has_type) {
1446                         man->use_type = false;
1447                         if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
1448                                 ret = -EBUSY;
1449                                 printk(KERN_ERR TTM_PFX
1450                                        "DRM memory manager type %d "
1451                                        "is not clean.\n", i);
1452                         }
1453                         man->has_type = false;
1454                 }
1455         }
1456
1457         mutex_lock(&glob->device_list_mutex);
1458         list_del(&bdev->device_list);
1459         mutex_unlock(&glob->device_list_mutex);
1460
1461         if (!cancel_delayed_work(&bdev->wq))
1462                 flush_scheduled_work();
1463
1464         while (ttm_bo_delayed_delete(bdev, true))
1465                 ;
1466
1467         spin_lock(&glob->lru_lock);
1468         if (list_empty(&bdev->ddestroy))
1469                 TTM_DEBUG("Delayed destroy list was clean\n");
1470
1471         if (list_empty(&bdev->man[0].lru))
1472                 TTM_DEBUG("Swap list was clean\n");
1473         spin_unlock(&glob->lru_lock);
1474
1475         BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
1476         write_lock(&bdev->vm_lock);
1477         drm_mm_takedown(&bdev->addr_space_mm);
1478         write_unlock(&bdev->vm_lock);
1479
1480         return ret;
1481 }
1482 EXPORT_SYMBOL(ttm_bo_device_release);
1483
1484 int ttm_bo_device_init(struct ttm_bo_device *bdev,
1485                        struct ttm_bo_global *glob,
1486                        struct ttm_bo_driver *driver,
1487                        uint64_t file_page_offset,
1488                        bool need_dma32)
1489 {
1490         int ret = -EINVAL;
1491
1492         rwlock_init(&bdev->vm_lock);
1493         bdev->driver = driver;
1494
1495         memset(bdev->man, 0, sizeof(bdev->man));
1496
1497         /*
1498          * Initialize the system memory buffer type.
1499          * Other types need to be driver / IOCTL initialized.
1500          */
1501         ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
1502         if (unlikely(ret != 0))
1503                 goto out_no_sys;
1504
1505         bdev->addr_space_rb = RB_ROOT;
1506         ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
1507         if (unlikely(ret != 0))
1508                 goto out_no_addr_mm;
1509
1510         INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1511         bdev->nice_mode = true;
1512         INIT_LIST_HEAD(&bdev->ddestroy);
1513         bdev->dev_mapping = NULL;
1514         bdev->glob = glob;
1515         bdev->need_dma32 = need_dma32;
1516
1517         mutex_lock(&glob->device_list_mutex);
1518         list_add_tail(&bdev->device_list, &glob->device_list);
1519         mutex_unlock(&glob->device_list_mutex);
1520
1521         return 0;
1522 out_no_addr_mm:
1523         ttm_bo_clean_mm(bdev, 0);
1524 out_no_sys:
1525         return ret;
1526 }
1527 EXPORT_SYMBOL(ttm_bo_device_init);
1528
1529 /*
1530  * buffer object vm functions.
1531  */
1532
1533 bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1534 {
1535         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1536
1537         if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
1538                 if (mem->mem_type == TTM_PL_SYSTEM)
1539                         return false;
1540
1541                 if (man->flags & TTM_MEMTYPE_FLAG_CMA)
1542                         return false;
1543
1544                 if (mem->placement & TTM_PL_FLAG_CACHED)
1545                         return false;
1546         }
1547         return true;
1548 }
1549
1550 int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
1551                       struct ttm_mem_reg *mem,
1552                       unsigned long *bus_base,
1553                       unsigned long *bus_offset, unsigned long *bus_size)
1554 {
1555         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1556
1557         *bus_size = 0;
1558         if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1559                 return -EINVAL;
1560
1561         if (ttm_mem_reg_is_pci(bdev, mem)) {
1562                 *bus_offset = mem->mm_node->start << PAGE_SHIFT;
1563                 *bus_size = mem->num_pages << PAGE_SHIFT;
1564                 *bus_base = man->io_offset;
1565         }
1566
1567         return 0;
1568 }
1569
1570 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1571 {
1572         struct ttm_bo_device *bdev = bo->bdev;
1573         loff_t offset = (loff_t) bo->addr_space_offset;
1574         loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
1575
1576         if (!bdev->dev_mapping)
1577                 return;
1578
1579         unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
1580 }
1581 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1582
1583 static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
1584 {
1585         struct ttm_bo_device *bdev = bo->bdev;
1586         struct rb_node **cur = &bdev->addr_space_rb.rb_node;
1587         struct rb_node *parent = NULL;
1588         struct ttm_buffer_object *cur_bo;
1589         unsigned long offset = bo->vm_node->start;
1590         unsigned long cur_offset;
1591
1592         while (*cur) {
1593                 parent = *cur;
1594                 cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
1595                 cur_offset = cur_bo->vm_node->start;
1596                 if (offset < cur_offset)
1597                         cur = &parent->rb_left;
1598                 else if (offset > cur_offset)
1599                         cur = &parent->rb_right;
1600                 else
1601                         BUG();
1602         }
1603
1604         rb_link_node(&bo->vm_rb, parent, cur);
1605         rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
1606 }
1607
1608 /**
1609  * ttm_bo_setup_vm:
1610  *
1611  * @bo: the buffer to allocate address space for
1612  *
1613  * Allocate address space in the drm device so that applications
1614  * can mmap the buffer and access the contents. This only
1615  * applies to ttm_bo_type_device objects as others are not
1616  * placed in the drm device address space.
1617  */
1618
1619 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
1620 {
1621         struct ttm_bo_device *bdev = bo->bdev;
1622         int ret;
1623
1624 retry_pre_get:
1625         ret = drm_mm_pre_get(&bdev->addr_space_mm);
1626         if (unlikely(ret != 0))
1627                 return ret;
1628
1629         write_lock(&bdev->vm_lock);
1630         bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
1631                                          bo->mem.num_pages, 0, 0);
1632
1633         if (unlikely(bo->vm_node == NULL)) {
1634                 ret = -ENOMEM;
1635                 goto out_unlock;
1636         }
1637
1638         bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
1639                                               bo->mem.num_pages, 0);
1640
1641         if (unlikely(bo->vm_node == NULL)) {
1642                 write_unlock(&bdev->vm_lock);
1643                 goto retry_pre_get;
1644         }
1645
1646         ttm_bo_vm_insert_rb(bo);
1647         write_unlock(&bdev->vm_lock);
1648         bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
1649
1650         return 0;
1651 out_unlock:
1652         write_unlock(&bdev->vm_lock);
1653         return ret;
1654 }
1655
1656 int ttm_bo_wait(struct ttm_buffer_object *bo,
1657                 bool lazy, bool interruptible, bool no_wait)
1658 {
1659         struct ttm_bo_driver *driver = bo->bdev->driver;
1660         void *sync_obj;
1661         void *sync_obj_arg;
1662         int ret = 0;
1663
1664         if (likely(bo->sync_obj == NULL))
1665                 return 0;
1666
1667         while (bo->sync_obj) {
1668
1669                 if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) {
1670                         void *tmp_obj = bo->sync_obj;
1671                         bo->sync_obj = NULL;
1672                         clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
1673                         spin_unlock(&bo->lock);
1674                         driver->sync_obj_unref(&tmp_obj);
1675                         spin_lock(&bo->lock);
1676                         continue;
1677                 }
1678
1679                 if (no_wait)
1680                         return -EBUSY;
1681
1682                 sync_obj = driver->sync_obj_ref(bo->sync_obj);
1683                 sync_obj_arg = bo->sync_obj_arg;
1684                 spin_unlock(&bo->lock);
1685                 ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
1686                                             lazy, interruptible);
1687                 if (unlikely(ret != 0)) {
1688                         driver->sync_obj_unref(&sync_obj);
1689                         spin_lock(&bo->lock);
1690                         return ret;
1691                 }
1692                 spin_lock(&bo->lock);
1693                 if (likely(bo->sync_obj == sync_obj &&
1694                            bo->sync_obj_arg == sync_obj_arg)) {
1695                         void *tmp_obj = bo->sync_obj;
1696                         bo->sync_obj = NULL;
1697                         clear_bit(TTM_BO_PRIV_FLAG_MOVING,
1698                                   &bo->priv_flags);
1699                         spin_unlock(&bo->lock);
1700                         driver->sync_obj_unref(&sync_obj);
1701                         driver->sync_obj_unref(&tmp_obj);
1702                         spin_lock(&bo->lock);
1703                 } else {
1704                         spin_unlock(&bo->lock);
1705                         driver->sync_obj_unref(&sync_obj);
1706                         spin_lock(&bo->lock);
1707                 }
1708         }
1709         return 0;
1710 }
1711 EXPORT_SYMBOL(ttm_bo_wait);
1712
1713 void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo)
1714 {
1715         atomic_set(&bo->reserved, 0);
1716         wake_up_all(&bo->event_queue);
1717 }
1718
1719 int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible,
1720                              bool no_wait)
1721 {
1722         int ret;
1723
1724         while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
1725                 if (no_wait)
1726                         return -EBUSY;
1727                 else if (interruptible) {
1728                         ret = wait_event_interruptible
1729                             (bo->event_queue, atomic_read(&bo->reserved) == 0);
1730                         if (unlikely(ret != 0))
1731                                 return ret;
1732                 } else {
1733                         wait_event(bo->event_queue,
1734                                    atomic_read(&bo->reserved) == 0);
1735                 }
1736         }
1737         return 0;
1738 }
1739
1740 int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1741 {
1742         int ret = 0;
1743
1744         /*
1745          * Using ttm_bo_reserve instead of ttm_bo_block_reservation
1746          * makes sure the lru lists are updated.
1747          */
1748
1749         ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
1750         if (unlikely(ret != 0))
1751                 return ret;
1752         spin_lock(&bo->lock);
1753         ret = ttm_bo_wait(bo, false, true, no_wait);
1754         spin_unlock(&bo->lock);
1755         if (likely(ret == 0))
1756                 atomic_inc(&bo->cpu_writers);
1757         ttm_bo_unreserve(bo);
1758         return ret;
1759 }
1760 EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
1761
1762 void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1763 {
1764         if (atomic_dec_and_test(&bo->cpu_writers))
1765                 wake_up_all(&bo->event_queue);
1766 }
1767 EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
1768
1769 /**
1770  * A buffer object shrink method that tries to swap out the first
1771  * buffer object on the bo_global::swap_lru list.
1772  */
1773
1774 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1775 {
1776         struct ttm_bo_global *glob =
1777             container_of(shrink, struct ttm_bo_global, shrink);
1778         struct ttm_buffer_object *bo;
1779         int ret = -EBUSY;
1780         int put_count;
1781         uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
1782
1783         spin_lock(&glob->lru_lock);
1784         while (ret == -EBUSY) {
1785                 if (unlikely(list_empty(&glob->swap_lru))) {
1786                         spin_unlock(&glob->lru_lock);
1787                         return -EBUSY;
1788                 }
1789
1790                 bo = list_first_entry(&glob->swap_lru,
1791                                       struct ttm_buffer_object, swap);
1792                 kref_get(&bo->list_kref);
1793
1794                 /**
1795                  * Reserve buffer. Since we unlock while sleeping, we need
1796                  * to re-check that nobody removed us from the swap-list while
1797                  * we slept.
1798                  */
1799
1800                 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
1801                 if (unlikely(ret == -EBUSY)) {
1802                         spin_unlock(&glob->lru_lock);
1803                         ttm_bo_wait_unreserved(bo, false);
1804                         kref_put(&bo->list_kref, ttm_bo_release_list);
1805                         spin_lock(&glob->lru_lock);
1806                 }
1807         }
1808
1809         BUG_ON(ret != 0);
1810         put_count = ttm_bo_del_from_lru(bo);
1811         spin_unlock(&glob->lru_lock);
1812
1813         while (put_count--)
1814                 kref_put(&bo->list_kref, ttm_bo_ref_bug);
1815
1816         /**
1817          * Wait for GPU, then move to system cached.
1818          */
1819
1820         spin_lock(&bo->lock);
1821         ret = ttm_bo_wait(bo, false, false, false);
1822         spin_unlock(&bo->lock);
1823
1824         if (unlikely(ret != 0))
1825                 goto out;
1826
1827         if ((bo->mem.placement & swap_placement) != swap_placement) {
1828                 struct ttm_mem_reg evict_mem;
1829
1830                 evict_mem = bo->mem;
1831                 evict_mem.mm_node = NULL;
1832                 evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
1833                 evict_mem.mem_type = TTM_PL_SYSTEM;
1834
1835                 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
1836                                              false, false);
1837                 if (unlikely(ret != 0))
1838                         goto out;
1839         }
1840
1841         ttm_bo_unmap_virtual(bo);
1842
1843         /**
1844          * Swap out. Buffer will be swapped in again as soon as
1845          * anyone tries to access a ttm page.
1846          */
1847
1848         if (bo->bdev->driver->swap_notify)
1849                 bo->bdev->driver->swap_notify(bo);
1850
1851         ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
1852 out:
1853
1854         /**
1855          *
1856          * Unreserve without putting on LRU to avoid swapping out an
1857          * already swapped buffer.
1858          */
1859
1860         atomic_set(&bo->reserved, 0);
1861         wake_up_all(&bo->event_queue);
1862         kref_put(&bo->list_kref, ttm_bo_release_list);
1863         return ret;
1864 }
1865
1866 void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1867 {
1868         while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
1869                 ;
1870 }
1871 EXPORT_SYMBOL(ttm_bo_swapout_all);