]> nv-tegra.nvidia Code Review - linux-2.6.git/blobdiff - drivers/gpu/drm/ttm/ttm_bo.c
drm/ttm: Make sure system buffer objects has offset == 0.
[linux-2.6.git] / drivers / gpu / drm / ttm / ttm_bo.c
index 6538d42369891fcccb1fed21b436ecd7f1c6773c..da3702135ada857a21985e07128fb41e85a906c1 100644 (file)
 /*
  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  */
+/* Notes:
+ *
+ * We store bo pointer in drm_mm_node struct so we know which bo own a
+ * specific node. There is no protection on the pointer, thus to make
+ * sure things don't go berserk you have to access this pointer while
+ * holding the global lru lock and make sure anytime you free a node you
+ * reset the pointer to NULL.
+ */
 
 #include "ttm/ttm_module.h"
 #include "ttm/ttm_bo_driver.h"
 
 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
+static void ttm_bo_global_kobj_release(struct kobject *kobj);
+
+static struct attribute ttm_bo_count = {
+       .name = "bo_count",
+       .mode = S_IRUGO
+};
+
+static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
+{
+       int i;
+
+       for (i = 0; i <= TTM_PL_PRIV5; i++)
+               if (flags & (1 << i)) {
+                       *mem_type = i;
+                       return 0;
+               }
+       return -EINVAL;
+}
+
+static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
+{
+       struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+
+       printk(KERN_ERR TTM_PFX "    has_type: %d\n", man->has_type);
+       printk(KERN_ERR TTM_PFX "    use_type: %d\n", man->use_type);
+       printk(KERN_ERR TTM_PFX "    flags: 0x%08X\n", man->flags);
+       printk(KERN_ERR TTM_PFX "    gpu_offset: 0x%08lX\n", man->gpu_offset);
+       printk(KERN_ERR TTM_PFX "    io_offset: 0x%08lX\n", man->io_offset);
+       printk(KERN_ERR TTM_PFX "    io_size: %ld\n", man->io_size);
+       printk(KERN_ERR TTM_PFX "    size: %llu\n", man->size);
+       printk(KERN_ERR TTM_PFX "    available_caching: 0x%08X\n",
+               man->available_caching);
+       printk(KERN_ERR TTM_PFX "    default_caching: 0x%08X\n",
+               man->default_caching);
+       if (mem_type != TTM_PL_SYSTEM) {
+               spin_lock(&bdev->glob->lru_lock);
+               drm_mm_debug_table(&man->manager, TTM_PFX);
+               spin_unlock(&bdev->glob->lru_lock);
+       }
+}
+
+static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
+                                       struct ttm_placement *placement)
+{
+       int i, ret, mem_type;
+
+       printk(KERN_ERR TTM_PFX "No space for %p (%lu pages, %luK, %luM)\n",
+               bo, bo->mem.num_pages, bo->mem.size >> 10,
+               bo->mem.size >> 20);
+       for (i = 0; i < placement->num_placement; i++) {
+               ret = ttm_mem_type_from_flags(placement->placement[i],
+                                               &mem_type);
+               if (ret)
+                       return;
+               printk(KERN_ERR TTM_PFX "  placement[%d]=0x%08X (%d)\n",
+                       i, placement->placement[i], mem_type);
+               ttm_mem_type_debug(bo->bdev, mem_type);
+       }
+}
+
+static ssize_t ttm_bo_global_show(struct kobject *kobj,
+                                 struct attribute *attr,
+                                 char *buffer)
+{
+       struct ttm_bo_global *glob =
+               container_of(kobj, struct ttm_bo_global, kobj);
+
+       return snprintf(buffer, PAGE_SIZE, "%lu\n",
+                       (unsigned long) atomic_read(&glob->bo_count));
+}
+
+static struct attribute *ttm_bo_global_attrs[] = {
+       &ttm_bo_count,
+       NULL
+};
+
+static struct sysfs_ops ttm_bo_global_ops = {
+       .show = &ttm_bo_global_show
+};
+
+static struct kobj_type ttm_bo_glob_kobj_type  = {
+       .release = &ttm_bo_global_kobj_release,
+       .sysfs_ops = &ttm_bo_global_ops,
+       .default_attrs = ttm_bo_global_attrs
+};
+
 
 static inline uint32_t ttm_bo_type_flags(unsigned type)
 {
@@ -66,10 +160,11 @@ static void ttm_bo_release_list(struct kref *list_kref)
 
        if (bo->ttm)
                ttm_tt_destroy(bo->ttm);
+       atomic_dec(&bo->glob->bo_count);
        if (bo->destroy)
                bo->destroy(bo);
        else {
-               ttm_mem_global_free(bdev->mem_glob, bo->acc_size, false);
+               ttm_mem_global_free(bdev->glob->mem_glob, bo->acc_size);
                kfree(bo);
        }
 }
@@ -83,12 +178,13 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
                ret = wait_event_interruptible(bo->event_queue,
                                               atomic_read(&bo->reserved) == 0);
                if (unlikely(ret != 0))
-                       return -ERESTART;
+                       return ret;
        } else {
                wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
        }
        return 0;
 }
+EXPORT_SYMBOL(ttm_bo_wait_unreserved);
 
 static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
 {
@@ -106,7 +202,7 @@ static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
                kref_get(&bo->list_kref);
 
                if (bo->ttm != NULL) {
-                       list_add_tail(&bo->swap, &bdev->swap_lru);
+                       list_add_tail(&bo->swap, &bo->glob->swap_lru);
                        kref_get(&bo->list_kref);
                }
        }
@@ -141,7 +237,7 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
                          bool interruptible,
                          bool no_wait, bool use_sequence, uint32_t sequence)
 {
-       struct ttm_bo_device *bdev = bo->bdev;
+       struct ttm_bo_global *glob = bo->glob;
        int ret;
 
        while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
@@ -153,9 +249,9 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
                if (no_wait)
                        return -EBUSY;
 
-               spin_unlock(&bdev->lru_lock);
+               spin_unlock(&glob->lru_lock);
                ret = ttm_bo_wait_unreserved(bo, interruptible);
-               spin_lock(&bdev->lru_lock);
+               spin_lock(&glob->lru_lock);
 
                if (unlikely(ret))
                        return ret;
@@ -181,16 +277,16 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo,
                   bool interruptible,
                   bool no_wait, bool use_sequence, uint32_t sequence)
 {
-       struct ttm_bo_device *bdev = bo->bdev;
+       struct ttm_bo_global *glob = bo->glob;
        int put_count = 0;
        int ret;
 
-       spin_lock(&bdev->lru_lock);
+       spin_lock(&glob->lru_lock);
        ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
                                    sequence);
        if (likely(ret == 0))
                put_count = ttm_bo_del_from_lru(bo);
-       spin_unlock(&bdev->lru_lock);
+       spin_unlock(&glob->lru_lock);
 
        while (put_count--)
                kref_put(&bo->list_kref, ttm_bo_ref_bug);
@@ -200,23 +296,23 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo,
 
 void ttm_bo_unreserve(struct ttm_buffer_object *bo)
 {
-       struct ttm_bo_device *bdev = bo->bdev;
+       struct ttm_bo_global *glob = bo->glob;
 
-       spin_lock(&bdev->lru_lock);
+       spin_lock(&glob->lru_lock);
        ttm_bo_add_to_lru(bo);
        atomic_set(&bo->reserved, 0);
        wake_up_all(&bo->event_queue);
-       spin_unlock(&bdev->lru_lock);
+       spin_unlock(&glob->lru_lock);
 }
 EXPORT_SYMBOL(ttm_bo_unreserve);
 
 /*
  * Call bo->mutex locked.
  */
-
 static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
 {
        struct ttm_bo_device *bdev = bo->bdev;
+       struct ttm_bo_global *glob = bo->glob;
        int ret = 0;
        uint32_t page_flags = 0;
 
@@ -232,17 +328,18 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
                        page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
        case ttm_bo_type_kernel:
                bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
-                                       page_flags, bdev->dummy_read_page);
+                                       page_flags, glob->dummy_read_page);
                if (unlikely(bo->ttm == NULL))
                        ret = -ENOMEM;
                break;
        case ttm_bo_type_user:
                bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
                                        page_flags | TTM_PAGE_FLAG_USER,
-                                       bdev->dummy_read_page);
-               if (unlikely(bo->ttm == NULL))
+                                       glob->dummy_read_page);
+               if (unlikely(bo->ttm == NULL)) {
                        ret = -ENOMEM;
-               break;
+                       break;
+               }
 
                ret = ttm_tt_set_user(bo->ttm, current,
                                      bo->buffer_start, bo->num_pages);
@@ -293,14 +390,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
                }
 
                if (bo->mem.mem_type == TTM_PL_SYSTEM) {
-
-                       struct ttm_mem_reg *old_mem = &bo->mem;
-                       uint32_t save_flags = old_mem->placement;
-
-                       *old_mem = *mem;
+                       bo->mem = *mem;
                        mem->mm_node = NULL;
-                       ttm_flag_masked(&save_flags, mem->placement,
-                                       TTM_PL_MASK_MEMTYPE);
                        goto moved;
                }
 
@@ -335,7 +426,8 @@ moved:
                    bdev->man[bo->mem.mem_type].gpu_offset;
                bo->cur_placement = bo->mem.placement;
                spin_unlock(&bo->lock);
-       }
+       } else
+               bo->offset = 0;
 
        return 0;
 
@@ -360,6 +452,7 @@ out_err:
 static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
 {
        struct ttm_bo_device *bdev = bo->bdev;
+       struct ttm_bo_global *glob = bo->glob;
        struct ttm_bo_driver *driver = bdev->driver;
        int ret;
 
@@ -371,7 +464,9 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
 
                spin_unlock(&bo->lock);
 
-               spin_lock(&bdev->lru_lock);
+               spin_lock(&glob->lru_lock);
+               put_count = ttm_bo_del_from_lru(bo);
+
                ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
                BUG_ON(ret);
                if (bo->ttm)
@@ -379,31 +474,31 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
 
                if (!list_empty(&bo->ddestroy)) {
                        list_del_init(&bo->ddestroy);
-                       kref_put(&bo->list_kref, ttm_bo_ref_bug);
+                       ++put_count;
                }
                if (bo->mem.mm_node) {
+                       bo->mem.mm_node->private = NULL;
                        drm_mm_put_block(bo->mem.mm_node);
                        bo->mem.mm_node = NULL;
                }
-               put_count = ttm_bo_del_from_lru(bo);
-               spin_unlock(&bdev->lru_lock);
+               spin_unlock(&glob->lru_lock);
 
                atomic_set(&bo->reserved, 0);
 
                while (put_count--)
-                       kref_put(&bo->list_kref, ttm_bo_release_list);
+                       kref_put(&bo->list_kref, ttm_bo_ref_bug);
 
                return 0;
        }
 
-       spin_lock(&bdev->lru_lock);
+       spin_lock(&glob->lru_lock);
        if (list_empty(&bo->ddestroy)) {
                void *sync_obj = bo->sync_obj;
                void *sync_obj_arg = bo->sync_obj_arg;
 
                kref_get(&bo->list_kref);
                list_add_tail(&bo->ddestroy, &bdev->ddestroy);
-               spin_unlock(&bdev->lru_lock);
+               spin_unlock(&glob->lru_lock);
                spin_unlock(&bo->lock);
 
                if (sync_obj)
@@ -413,7 +508,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
                ret = 0;
 
        } else {
-               spin_unlock(&bdev->lru_lock);
+               spin_unlock(&glob->lru_lock);
                spin_unlock(&bo->lock);
                ret = -EBUSY;
        }
@@ -428,11 +523,12 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
 
 static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
 {
+       struct ttm_bo_global *glob = bdev->glob;
        struct ttm_buffer_object *entry, *nentry;
        struct list_head *list, *next;
        int ret;
 
-       spin_lock(&bdev->lru_lock);
+       spin_lock(&glob->lru_lock);
        list_for_each_safe(list, next, &bdev->ddestroy) {
                entry = list_entry(list, struct ttm_buffer_object, ddestroy);
                nentry = NULL;
@@ -449,16 +545,16 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
                }
                kref_get(&entry->list_kref);
 
-               spin_unlock(&bdev->lru_lock);
+               spin_unlock(&glob->lru_lock);
                ret = ttm_bo_cleanup_refs(entry, remove_all);
                kref_put(&entry->list_kref, ttm_bo_release_list);
 
-               spin_lock(&bdev->lru_lock);
+               spin_lock(&glob->lru_lock);
                if (nentry) {
                        bool next_onlist = !list_empty(next);
-                       spin_unlock(&bdev->lru_lock);
+                       spin_unlock(&glob->lru_lock);
                        kref_put(&nentry->list_kref, ttm_bo_release_list);
-                       spin_lock(&bdev->lru_lock);
+                       spin_lock(&glob->lru_lock);
                        /*
                         * Someone might have raced us and removed the
                         * next entry from the list. We don't bother restarting
@@ -472,7 +568,7 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
                        break;
        }
        ret = !list_empty(&bdev->ddestroy);
-       spin_unlock(&bdev->lru_lock);
+       spin_unlock(&glob->lru_lock);
 
        return ret;
 }
@@ -517,23 +613,21 @@ void ttm_bo_unref(struct ttm_buffer_object **p_bo)
 }
 EXPORT_SYMBOL(ttm_bo_unref);
 
-static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
-                       bool interruptible, bool no_wait)
+static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
+                       bool no_wait)
 {
-       int ret = 0;
        struct ttm_bo_device *bdev = bo->bdev;
+       struct ttm_bo_global *glob = bo->glob;
        struct ttm_mem_reg evict_mem;
-       uint32_t proposed_placement;
-
-       if (bo->mem.mem_type != mem_type)
-               goto out;
+       struct ttm_placement placement;
+       int ret = 0;
 
        spin_lock(&bo->lock);
        ret = ttm_bo_wait(bo, false, interruptible, no_wait);
        spin_unlock(&bo->lock);
 
        if (unlikely(ret != 0)) {
-               if (ret != -ERESTART) {
+               if (ret != -ERESTARTSYS) {
                        printk(KERN_ERR TTM_PFX
                               "Failed to expire sync object before "
                               "buffer eviction.\n");
@@ -546,115 +640,165 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
        evict_mem = bo->mem;
        evict_mem.mm_node = NULL;
 
-       proposed_placement = bdev->driver->evict_flags(bo);
-
-       ret = ttm_bo_mem_space(bo, proposed_placement,
-                              &evict_mem, interruptible, no_wait);
-       if (unlikely(ret != 0 && ret != -ERESTART))
-               ret = ttm_bo_mem_space(bo, TTM_PL_FLAG_SYSTEM,
-                                      &evict_mem, interruptible, no_wait);
-
+       placement.fpfn = 0;
+       placement.lpfn = 0;
+       placement.num_placement = 0;
+       placement.num_busy_placement = 0;
+       bdev->driver->evict_flags(bo, &placement);
+       ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
+                               no_wait);
        if (ret) {
-               if (ret != -ERESTART)
+               if (ret != -ERESTARTSYS) {
                        printk(KERN_ERR TTM_PFX
                               "Failed to find memory space for "
                               "buffer 0x%p eviction.\n", bo);
+                       ttm_bo_mem_space_debug(bo, &placement);
+               }
                goto out;
        }
 
        ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
                                     no_wait);
        if (ret) {
-               if (ret != -ERESTART)
+               if (ret != -ERESTARTSYS)
                        printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
+               spin_lock(&glob->lru_lock);
+               if (evict_mem.mm_node) {
+                       evict_mem.mm_node->private = NULL;
+                       drm_mm_put_block(evict_mem.mm_node);
+                       evict_mem.mm_node = NULL;
+               }
+               spin_unlock(&glob->lru_lock);
                goto out;
        }
-
-       spin_lock(&bdev->lru_lock);
-       if (evict_mem.mm_node) {
-               drm_mm_put_block(evict_mem.mm_node);
-               evict_mem.mm_node = NULL;
-       }
-       spin_unlock(&bdev->lru_lock);
        bo->evicted = true;
 out:
        return ret;
 }
 
-/**
- * Repeatedly evict memory from the LRU for @mem_type until we create enough
- * space, or we've evicted everything and there isn't enough space.
- */
-static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev,
-                                 struct ttm_mem_reg *mem,
-                                 uint32_t mem_type,
-                                 bool interruptible, bool no_wait)
+static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
+                               uint32_t mem_type,
+                               bool interruptible, bool no_wait)
 {
-       struct drm_mm_node *node;
-       struct ttm_buffer_object *entry;
+       struct ttm_bo_global *glob = bdev->glob;
        struct ttm_mem_type_manager *man = &bdev->man[mem_type];
-       struct list_head *lru;
-       unsigned long num_pages = mem->num_pages;
-       int put_count = 0;
-       int ret;
-
-retry_pre_get:
-       ret = drm_mm_pre_get(&man->manager);
-       if (unlikely(ret != 0))
-               return ret;
+       struct ttm_buffer_object *bo;
+       int ret, put_count = 0;
 
-       spin_lock(&bdev->lru_lock);
-       do {
-               node = drm_mm_search_free(&man->manager, num_pages,
-                                         mem->page_alignment, 1);
-               if (node)
-                       break;
+retry:
+       spin_lock(&glob->lru_lock);
+       if (list_empty(&man->lru)) {
+               spin_unlock(&glob->lru_lock);
+               return -EBUSY;
+       }
 
-               lru = &man->lru;
-               if (list_empty(lru))
-                       break;
+       bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
+       kref_get(&bo->list_kref);
 
-               entry = list_first_entry(lru, struct ttm_buffer_object, lru);
-               kref_get(&entry->list_kref);
+       ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
 
-               ret =
-                   ttm_bo_reserve_locked(entry, interruptible, no_wait,
-                                         false, 0);
+       if (unlikely(ret == -EBUSY)) {
+               spin_unlock(&glob->lru_lock);
+               if (likely(!no_wait))
+                       ret = ttm_bo_wait_unreserved(bo, interruptible);
 
-               if (likely(ret == 0))
-                       put_count = ttm_bo_del_from_lru(entry);
+               kref_put(&bo->list_kref, ttm_bo_release_list);
 
-               spin_unlock(&bdev->lru_lock);
+               /**
+                * We *need* to retry after releasing the lru lock.
+                */
 
                if (unlikely(ret != 0))
                        return ret;
+               goto retry;
+       }
 
-               while (put_count--)
-                       kref_put(&entry->list_kref, ttm_bo_ref_bug);
+       put_count = ttm_bo_del_from_lru(bo);
+       spin_unlock(&glob->lru_lock);
 
-               ret = ttm_bo_evict(entry, mem_type, interruptible, no_wait);
+       BUG_ON(ret != 0);
 
-               ttm_bo_unreserve(entry);
+       while (put_count--)
+               kref_put(&bo->list_kref, ttm_bo_ref_bug);
 
-               kref_put(&entry->list_kref, ttm_bo_release_list);
-               if (ret)
-                       return ret;
+       ret = ttm_bo_evict(bo, interruptible, no_wait);
+       ttm_bo_unreserve(bo);
 
-               spin_lock(&bdev->lru_lock);
-       } while (1);
+       kref_put(&bo->list_kref, ttm_bo_release_list);
+       return ret;
+}
 
-       if (!node) {
-               spin_unlock(&bdev->lru_lock);
-               return -ENOMEM;
-       }
+static int ttm_bo_man_get_node(struct ttm_buffer_object *bo,
+                               struct ttm_mem_type_manager *man,
+                               struct ttm_placement *placement,
+                               struct ttm_mem_reg *mem,
+                               struct drm_mm_node **node)
+{
+       struct ttm_bo_global *glob = bo->glob;
+       unsigned long lpfn;
+       int ret;
 
-       node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment);
-       if (unlikely(!node)) {
-               spin_unlock(&bdev->lru_lock);
-               goto retry_pre_get;
-       }
+       lpfn = placement->lpfn;
+       if (!lpfn)
+               lpfn = man->size;
+       *node = NULL;
+       do {
+               ret = drm_mm_pre_get(&man->manager);
+               if (unlikely(ret))
+                       return ret;
 
-       spin_unlock(&bdev->lru_lock);
+               spin_lock(&glob->lru_lock);
+               *node = drm_mm_search_free_in_range(&man->manager,
+                                       mem->num_pages, mem->page_alignment,
+                                       placement->fpfn, lpfn, 1);
+               if (unlikely(*node == NULL)) {
+                       spin_unlock(&glob->lru_lock);
+                       return 0;
+               }
+               *node = drm_mm_get_block_atomic_range(*node, mem->num_pages,
+                                                       mem->page_alignment,
+                                                       placement->fpfn,
+                                                       lpfn);
+               spin_unlock(&glob->lru_lock);
+       } while (*node == NULL);
+       return 0;
+}
+
+/**
+ * Repeatedly evict memory from the LRU for @mem_type until we create enough
+ * space, or we've evicted everything and there isn't enough space.
+ */
+static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
+                                       uint32_t mem_type,
+                                       struct ttm_placement *placement,
+                                       struct ttm_mem_reg *mem,
+                                       bool interruptible, bool no_wait)
+{
+       struct ttm_bo_device *bdev = bo->bdev;
+       struct ttm_bo_global *glob = bdev->glob;
+       struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+       struct drm_mm_node *node;
+       int ret;
+
+       do {
+               ret = ttm_bo_man_get_node(bo, man, placement, mem, &node);
+               if (unlikely(ret != 0))
+                       return ret;
+               if (node)
+                       break;
+               spin_lock(&glob->lru_lock);
+               if (list_empty(&man->lru)) {
+                       spin_unlock(&glob->lru_lock);
+                       break;
+               }
+               spin_unlock(&glob->lru_lock);
+               ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
+                                               no_wait);
+               if (unlikely(ret != 0))
+                       return ret;
+       } while (1);
+       if (node == NULL)
+               return -ENOMEM;
        mem->mm_node = node;
        mem->mem_type = mem_type;
        return 0;
@@ -685,7 +829,6 @@ static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
        return result;
 }
 
-
 static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
                                 bool disallow_fixed,
                                 uint32_t mem_type,
@@ -718,65 +861,55 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
  * space.
  */
 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
-                    uint32_t proposed_placement,
-                    struct ttm_mem_reg *mem,
-                    bool interruptible, bool no_wait)
+                       struct ttm_placement *placement,
+                       struct ttm_mem_reg *mem,
+                       bool interruptible, bool no_wait)
 {
        struct ttm_bo_device *bdev = bo->bdev;
        struct ttm_mem_type_manager *man;
-
-       uint32_t num_prios = bdev->driver->num_mem_type_prio;
-       const uint32_t *prios = bdev->driver->mem_type_prio;
-       uint32_t i;
        uint32_t mem_type = TTM_PL_SYSTEM;
        uint32_t cur_flags = 0;
        bool type_found = false;
        bool type_ok = false;
-       bool has_eagain = false;
+       bool has_erestartsys = false;
        struct drm_mm_node *node = NULL;
-       int ret;
+       int i, ret;
 
        mem->mm_node = NULL;
-       for (i = 0; i < num_prios; ++i) {
-               mem_type = prios[i];
+       for (i = 0; i < placement->num_placement; ++i) {
+               ret = ttm_mem_type_from_flags(placement->placement[i],
+                                               &mem_type);
+               if (ret)
+                       return ret;
                man = &bdev->man[mem_type];
 
                type_ok = ttm_bo_mt_compatible(man,
-                                              bo->type == ttm_bo_type_user,
-                                              mem_type, proposed_placement,
-                                              &cur_flags);
+                                               bo->type == ttm_bo_type_user,
+                                               mem_type,
+                                               placement->placement[i],
+                                               &cur_flags);
 
                if (!type_ok)
                        continue;
 
                cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
                                                  cur_flags);
+               /*
+                * Use the access and other non-mapping-related flag bits from
+                * the memory placement flags to the current flags
+                */
+               ttm_flag_masked(&cur_flags, placement->placement[i],
+                               ~TTM_PL_MASK_MEMTYPE);
 
                if (mem_type == TTM_PL_SYSTEM)
                        break;
 
                if (man->has_type && man->use_type) {
                        type_found = true;
-                       do {
-                               ret = drm_mm_pre_get(&man->manager);
-                               if (unlikely(ret))
-                                       return ret;
-
-                               spin_lock(&bdev->lru_lock);
-                               node = drm_mm_search_free(&man->manager,
-                                                         mem->num_pages,
-                                                         mem->page_alignment,
-                                                         1);
-                               if (unlikely(!node)) {
-                                       spin_unlock(&bdev->lru_lock);
-                                       break;
-                               }
-                               node = drm_mm_get_block_atomic(node,
-                                                              mem->num_pages,
-                                                              mem->
-                                                              page_alignment);
-                               spin_unlock(&bdev->lru_lock);
-                       } while (!node);
+                       ret = ttm_bo_man_get_node(bo, man, placement, mem,
+                                                       &node);
+                       if (unlikely(ret))
+                               return ret;
                }
                if (node)
                        break;
@@ -786,69 +919,68 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
                mem->mm_node = node;
                mem->mem_type = mem_type;
                mem->placement = cur_flags;
+               if (node)
+                       node->private = bo;
                return 0;
        }
 
        if (!type_found)
                return -EINVAL;
 
-       num_prios = bdev->driver->num_mem_busy_prio;
-       prios = bdev->driver->mem_busy_prio;
-
-       for (i = 0; i < num_prios; ++i) {
-               mem_type = prios[i];
+       for (i = 0; i < placement->num_busy_placement; ++i) {
+               ret = ttm_mem_type_from_flags(placement->busy_placement[i],
+                                               &mem_type);
+               if (ret)
+                       return ret;
                man = &bdev->man[mem_type];
-
                if (!man->has_type)
                        continue;
-
                if (!ttm_bo_mt_compatible(man,
-                                         bo->type == ttm_bo_type_user,
-                                         mem_type,
-                                         proposed_placement, &cur_flags))
+                                               bo->type == ttm_bo_type_user,
+                                               mem_type,
+                                               placement->busy_placement[i],
+                                               &cur_flags))
                        continue;
 
                cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
                                                  cur_flags);
+               /*
+                * Use the access and other non-mapping-related flag bits from
+                * the memory placement flags to the current flags
+                */
+               ttm_flag_masked(&cur_flags, placement->busy_placement[i],
+                               ~TTM_PL_MASK_MEMTYPE);
 
-               ret = ttm_bo_mem_force_space(bdev, mem, mem_type,
-                                            interruptible, no_wait);
-
+               ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
+                                               interruptible, no_wait);
                if (ret == 0 && mem->mm_node) {
                        mem->placement = cur_flags;
+                       mem->mm_node->private = bo;
                        return 0;
                }
-
-               if (ret == -ERESTART)
-                       has_eagain = true;
+               if (ret == -ERESTARTSYS)
+                       has_erestartsys = true;
        }
-
-       ret = (has_eagain) ? -ERESTART : -ENOMEM;
+       ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
        return ret;
 }
 EXPORT_SYMBOL(ttm_bo_mem_space);
 
 int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
 {
-       int ret = 0;
-
        if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
                return -EBUSY;
 
-       ret = wait_event_interruptible(bo->event_queue,
-                                      atomic_read(&bo->cpu_writers) == 0);
-
-       if (ret == -ERESTARTSYS)
-               ret = -ERESTART;
-
-       return ret;
+       return wait_event_interruptible(bo->event_queue,
+                                       atomic_read(&bo->cpu_writers) == 0);
 }
+EXPORT_SYMBOL(ttm_bo_wait_cpu);
 
 int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
-                      uint32_t proposed_placement,
-                      bool interruptible, bool no_wait)
+                       struct ttm_placement *placement,
+                       bool interruptible, bool no_wait)
 {
-       struct ttm_bo_device *bdev = bo->bdev;
+       struct ttm_bo_global *glob = bo->glob;
        int ret = 0;
        struct ttm_mem_reg mem;
 
@@ -859,147 +991,132 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
         * Have the driver move function wait for idle when necessary,
         * instead of doing it here.
         */
-
        spin_lock(&bo->lock);
        ret = ttm_bo_wait(bo, false, interruptible, no_wait);
        spin_unlock(&bo->lock);
-
        if (ret)
                return ret;
-
        mem.num_pages = bo->num_pages;
        mem.size = mem.num_pages << PAGE_SHIFT;
        mem.page_alignment = bo->mem.page_alignment;
-
        /*
         * Determine where to move the buffer.
         */
-
-       ret = ttm_bo_mem_space(bo, proposed_placement, &mem,
-                              interruptible, no_wait);
+       ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait);
        if (ret)
                goto out_unlock;
-
        ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
-
 out_unlock:
        if (ret && mem.mm_node) {
-               spin_lock(&bdev->lru_lock);
+               spin_lock(&glob->lru_lock);
+               mem.mm_node->private = NULL;
                drm_mm_put_block(mem.mm_node);
-               spin_unlock(&bdev->lru_lock);
+               spin_unlock(&glob->lru_lock);
        }
        return ret;
 }
 
-static int ttm_bo_mem_compat(uint32_t proposed_placement,
+static int ttm_bo_mem_compat(struct ttm_placement *placement,
                             struct ttm_mem_reg *mem)
 {
-       if ((proposed_placement & mem->placement & TTM_PL_MASK_MEM) == 0)
-               return 0;
-       if ((proposed_placement & mem->placement & TTM_PL_MASK_CACHING) == 0)
-               return 0;
-
-       return 1;
+       int i;
+
+       for (i = 0; i < placement->num_placement; i++) {
+               if ((placement->placement[i] & mem->placement &
+                       TTM_PL_MASK_CACHING) &&
+                       (placement->placement[i] & mem->placement &
+                       TTM_PL_MASK_MEM))
+                       return i;
+       }
+       return -1;
 }
 
-int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
-                              uint32_t proposed_placement,
-                              bool interruptible, bool no_wait)
+int ttm_bo_validate(struct ttm_buffer_object *bo,
+                       struct ttm_placement *placement,
+                       bool interruptible, bool no_wait)
 {
        int ret;
 
        BUG_ON(!atomic_read(&bo->reserved));
-       bo->proposed_placement = proposed_placement;
-
-       TTM_DEBUG("Proposed placement 0x%08lx, Old flags 0x%08lx\n",
-                 (unsigned long)proposed_placement,
-                 (unsigned long)bo->mem.placement);
-
+       /* Check that range is valid */
+       if (placement->lpfn || placement->fpfn)
+               if (placement->fpfn > placement->lpfn ||
+                       (placement->lpfn - placement->fpfn) < bo->num_pages)
+                       return -EINVAL;
        /*
         * Check whether we need to move buffer.
         */
-
-       if (!ttm_bo_mem_compat(bo->proposed_placement, &bo->mem)) {
-               ret = ttm_bo_move_buffer(bo, bo->proposed_placement,
-                                        interruptible, no_wait);
-               if (ret) {
-                       if (ret != -ERESTART)
-                               printk(KERN_ERR TTM_PFX
-                                      "Failed moving buffer. "
-                                      "Proposed placement 0x%08x\n",
-                                      bo->proposed_placement);
-                       if (ret == -ENOMEM)
-                               printk(KERN_ERR TTM_PFX
-                                      "Out of aperture space or "
-                                      "DRM memory quota.\n");
+       ret = ttm_bo_mem_compat(placement, &bo->mem);
+       if (ret < 0) {
+               ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait);
+               if (ret)
                        return ret;
-               }
+       } else {
+               /*
+                * Use the access and other non-mapping-related flag bits from
+                * the compatible memory placement flags to the active flags
+                */
+               ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
+                               ~TTM_PL_MASK_MEMTYPE);
        }
-
        /*
         * We might need to add a TTM.
         */
-
        if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
                ret = ttm_bo_add_ttm(bo, true);
                if (ret)
                        return ret;
        }
-       /*
-        * Validation has succeeded, move the access and other
-        * non-mapping-related flag bits from the proposed flags to
-        * the active flags
-        */
-
-       ttm_flag_masked(&bo->mem.placement, bo->proposed_placement,
-                       ~TTM_PL_MASK_MEMTYPE);
-
        return 0;
 }
-EXPORT_SYMBOL(ttm_buffer_object_validate);
+EXPORT_SYMBOL(ttm_bo_validate);
 
-int
-ttm_bo_check_placement(struct ttm_buffer_object *bo,
-                      uint32_t set_flags, uint32_t clr_flags)
+int ttm_bo_check_placement(struct ttm_buffer_object *bo,
+                               struct ttm_placement *placement)
 {
-       uint32_t new_mask = set_flags | clr_flags;
-
-       if ((bo->type == ttm_bo_type_user) &&
-           (clr_flags & TTM_PL_FLAG_CACHED)) {
-               printk(KERN_ERR TTM_PFX
-                      "User buffers require cache-coherent memory.\n");
-               return -EINVAL;
-       }
-
-       if (!capable(CAP_SYS_ADMIN)) {
-               if (new_mask & TTM_PL_FLAG_NO_EVICT) {
-                       printk(KERN_ERR TTM_PFX "Need to be root to modify"
-                              " NO_EVICT status.\n");
+       int i;
+
+       if (placement->fpfn || placement->lpfn) {
+               if (bo->mem.num_pages > (placement->lpfn - placement->fpfn)) {
+                       printk(KERN_ERR TTM_PFX "Page number range to small "
+                               "Need %lu pages, range is [%u, %u]\n",
+                               bo->mem.num_pages, placement->fpfn,
+                               placement->lpfn);
                        return -EINVAL;
                }
-
-               if ((clr_flags & bo->mem.placement & TTM_PL_MASK_MEMTYPE) &&
-                   (bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
-                       printk(KERN_ERR TTM_PFX
-                              "Incompatible memory specification"
-                              " for NO_EVICT buffer.\n");
-                       return -EINVAL;
+       }
+       for (i = 0; i < placement->num_placement; i++) {
+               if (!capable(CAP_SYS_ADMIN)) {
+                       if (placement->placement[i] & TTM_PL_FLAG_NO_EVICT) {
+                               printk(KERN_ERR TTM_PFX "Need to be root to "
+                                       "modify NO_EVICT status.\n");
+                               return -EINVAL;
+                       }
+               }
+       }
+       for (i = 0; i < placement->num_busy_placement; i++) {
+               if (!capable(CAP_SYS_ADMIN)) {
+                       if (placement->busy_placement[i] & TTM_PL_FLAG_NO_EVICT) {
+                               printk(KERN_ERR TTM_PFX "Need to be root to "
+                                       "modify NO_EVICT status.\n");
+                               return -EINVAL;
+                       }
                }
        }
        return 0;
 }
 
-int ttm_buffer_object_init(struct ttm_bo_device *bdev,
-                          struct ttm_buffer_object *bo,
-                          unsigned long size,
-                          enum ttm_bo_type type,
-                          uint32_t flags,
-                          uint32_t page_alignment,
-                          unsigned long buffer_start,
-                          bool interruptible,
-                          struct file *persistant_swap_storage,
-                          size_t acc_size,
-                          void (*destroy) (struct ttm_buffer_object *))
+int ttm_bo_init(struct ttm_bo_device *bdev,
+               struct ttm_buffer_object *bo,
+               unsigned long size,
+               enum ttm_bo_type type,
+               struct ttm_placement *placement,
+               uint32_t page_alignment,
+               unsigned long buffer_start,
+               bool interruptible,
+               struct file *persistant_swap_storage,
+               size_t acc_size,
+               void (*destroy) (struct ttm_buffer_object *))
 {
        int ret = 0;
        unsigned long num_pages;
@@ -1022,8 +1139,10 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev,
        INIT_LIST_HEAD(&bo->ddestroy);
        INIT_LIST_HEAD(&bo->swap);
        bo->bdev = bdev;
+       bo->glob = bdev->glob;
        bo->type = type;
        bo->num_pages = num_pages;
+       bo->mem.size = num_pages << PAGE_SHIFT;
        bo->mem.mem_type = TTM_PL_SYSTEM;
        bo->mem.num_pages = bo->num_pages;
        bo->mem.mm_node = NULL;
@@ -1034,30 +1153,23 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev,
        bo->seq_valid = false;
        bo->persistant_swap_storage = persistant_swap_storage;
        bo->acc_size = acc_size;
+       atomic_inc(&bo->glob->bo_count);
 
-       ret = ttm_bo_check_placement(bo, flags, 0ULL);
+       ret = ttm_bo_check_placement(bo, placement);
        if (unlikely(ret != 0))
                goto out_err;
 
-       /*
-        * If no caching attributes are set, accept any form of caching.
-        */
-
-       if ((flags & TTM_PL_MASK_CACHING) == 0)
-               flags |= TTM_PL_MASK_CACHING;
-
        /*
         * For ttm_bo_type_device buffers, allocate
         * address space from the device.
         */
-
        if (bo->type == ttm_bo_type_device) {
                ret = ttm_bo_setup_vm(bo);
                if (ret)
                        goto out_err;
        }
 
-       ret = ttm_buffer_object_validate(bo, flags, interruptible, false);
+       ret = ttm_bo_validate(bo, placement, interruptible, false);
        if (ret)
                goto out_err;
 
@@ -1070,125 +1182,93 @@ out_err:
 
        return ret;
 }
-EXPORT_SYMBOL(ttm_buffer_object_init);
+EXPORT_SYMBOL(ttm_bo_init);
 
-static inline size_t ttm_bo_size(struct ttm_bo_device *bdev,
+static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
                                 unsigned long num_pages)
 {
        size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
            PAGE_MASK;
 
-       return bdev->ttm_bo_size + 2 * page_array_size;
+       return glob->ttm_bo_size + 2 * page_array_size;
 }
 
-int ttm_buffer_object_create(struct ttm_bo_device *bdev,
-                            unsigned long size,
-                            enum ttm_bo_type type,
-                            uint32_t flags,
-                            uint32_t page_alignment,
-                            unsigned long buffer_start,
-                            bool interruptible,
-                            struct file *persistant_swap_storage,
-                            struct ttm_buffer_object **p_bo)
+int ttm_bo_create(struct ttm_bo_device *bdev,
+                       unsigned long size,
+                       enum ttm_bo_type type,
+                       struct ttm_placement *placement,
+                       uint32_t page_alignment,
+                       unsigned long buffer_start,
+                       bool interruptible,
+                       struct file *persistant_swap_storage,
+                       struct ttm_buffer_object **p_bo)
 {
        struct ttm_buffer_object *bo;
+       struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
        int ret;
-       struct ttm_mem_global *mem_glob = bdev->mem_glob;
 
        size_t acc_size =
-           ttm_bo_size(bdev, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
-       ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false);
+           ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
+       ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
        if (unlikely(ret != 0))
                return ret;
 
        bo = kzalloc(sizeof(*bo), GFP_KERNEL);
 
        if (unlikely(bo == NULL)) {
-               ttm_mem_global_free(mem_glob, acc_size, false);
+               ttm_mem_global_free(mem_glob, acc_size);
                return -ENOMEM;
        }
 
-       ret = ttm_buffer_object_init(bdev, bo, size, type, flags,
-                                    page_alignment, buffer_start,
-                                    interruptible,
-                                    persistant_swap_storage, acc_size, NULL);
+       ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
+                               buffer_start, interruptible,
+                               persistant_swap_storage, acc_size, NULL);
        if (likely(ret == 0))
                *p_bo = bo;
 
        return ret;
 }
 
-static int ttm_bo_leave_list(struct ttm_buffer_object *bo,
-                            uint32_t mem_type, bool allow_errors)
-{
-       int ret;
-
-       spin_lock(&bo->lock);
-       ret = ttm_bo_wait(bo, false, false, false);
-       spin_unlock(&bo->lock);
-
-       if (ret && allow_errors)
-               goto out;
-
-       if (bo->mem.mem_type == mem_type)
-               ret = ttm_bo_evict(bo, mem_type, false, false);
-
-       if (ret) {
-               if (allow_errors) {
-                       goto out;
-               } else {
-                       ret = 0;
-                       printk(KERN_ERR TTM_PFX "Cleanup eviction failed\n");
-               }
-       }
-
-out:
-       return ret;
-}
-
 static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
-                                  struct list_head *head,
-                                  unsigned mem_type, bool allow_errors)
+                                       unsigned mem_type, bool allow_errors)
 {
-       struct ttm_buffer_object *entry;
+       struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+       struct ttm_bo_global *glob = bdev->glob;
        int ret;
-       int put_count;
 
        /*
         * Can't use standard list traversal since we're unlocking.
         */
 
-       spin_lock(&bdev->lru_lock);
-
-       while (!list_empty(head)) {
-               entry = list_first_entry(head, struct ttm_buffer_object, lru);
-               kref_get(&entry->list_kref);
-               ret = ttm_bo_reserve_locked(entry, false, false, false, 0);
-               put_count = ttm_bo_del_from_lru(entry);
-               spin_unlock(&bdev->lru_lock);
-               while (put_count--)
-                       kref_put(&entry->list_kref, ttm_bo_ref_bug);
-               BUG_ON(ret);
-               ret = ttm_bo_leave_list(entry, mem_type, allow_errors);
-               ttm_bo_unreserve(entry);
-               kref_put(&entry->list_kref, ttm_bo_release_list);
-               spin_lock(&bdev->lru_lock);
+       spin_lock(&glob->lru_lock);
+       while (!list_empty(&man->lru)) {
+               spin_unlock(&glob->lru_lock);
+               ret = ttm_mem_evict_first(bdev, mem_type, false, false);
+               if (ret) {
+                       if (allow_errors) {
+                               return ret;
+                       } else {
+                               printk(KERN_ERR TTM_PFX
+                                       "Cleanup eviction failed\n");
+                       }
+               }
+               spin_lock(&glob->lru_lock);
        }
-
-       spin_unlock(&bdev->lru_lock);
-
+       spin_unlock(&glob->lru_lock);
        return 0;
 }
 
 int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
 {
-       struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+       struct ttm_bo_global *glob = bdev->glob;
+       struct ttm_mem_type_manager *man;
        int ret = -EINVAL;
 
        if (mem_type >= TTM_NUM_MEM_TYPES) {
                printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type);
                return ret;
        }
+       man = &bdev->man[mem_type];
 
        if (!man->has_type) {
                printk(KERN_ERR TTM_PFX "Trying to take down uninitialized "
@@ -1201,15 +1281,15 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
 
        ret = 0;
        if (mem_type > 0) {
-               ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false);
+               ttm_bo_force_list_clean(bdev, mem_type, false);
 
-               spin_lock(&bdev->lru_lock);
+               spin_lock(&glob->lru_lock);
                if (drm_mm_clean(&man->manager))
                        drm_mm_takedown(&man->manager);
                else
                        ret = -EBUSY;
 
-               spin_unlock(&bdev->lru_lock);
+               spin_unlock(&glob->lru_lock);
        }
 
        return ret;
@@ -1234,12 +1314,12 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
                return 0;
        }
 
-       return ttm_bo_force_list_clean(bdev, &man->lru, mem_type, true);
+       return ttm_bo_force_list_clean(bdev, mem_type, true);
 }
 EXPORT_SYMBOL(ttm_bo_evict_mm);
 
 int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
-                  unsigned long p_offset, unsigned long p_size)
+                       unsigned long p_size)
 {
        int ret = -EINVAL;
        struct ttm_mem_type_manager *man;
@@ -1269,7 +1349,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
                               type);
                        return ret;
                }
-               ret = drm_mm_init(&man->manager, p_offset, p_size);
+               ret = drm_mm_init(&man->manager, 0, p_size);
                if (ret)
                        return ret;
        }
@@ -1283,11 +1363,82 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
 }
 EXPORT_SYMBOL(ttm_bo_init_mm);
 
+static void ttm_bo_global_kobj_release(struct kobject *kobj)
+{
+       struct ttm_bo_global *glob =
+               container_of(kobj, struct ttm_bo_global, kobj);
+
+       ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
+       __free_page(glob->dummy_read_page);
+       kfree(glob);
+}
+
+void ttm_bo_global_release(struct ttm_global_reference *ref)
+{
+       struct ttm_bo_global *glob = ref->object;
+
+       kobject_del(&glob->kobj);
+       kobject_put(&glob->kobj);
+}
+EXPORT_SYMBOL(ttm_bo_global_release);
+
+int ttm_bo_global_init(struct ttm_global_reference *ref)
+{
+       struct ttm_bo_global_ref *bo_ref =
+               container_of(ref, struct ttm_bo_global_ref, ref);
+       struct ttm_bo_global *glob = ref->object;
+       int ret;
+
+       mutex_init(&glob->device_list_mutex);
+       spin_lock_init(&glob->lru_lock);
+       glob->mem_glob = bo_ref->mem_glob;
+       glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
+
+       if (unlikely(glob->dummy_read_page == NULL)) {
+               ret = -ENOMEM;
+               goto out_no_drp;
+       }
+
+       INIT_LIST_HEAD(&glob->swap_lru);
+       INIT_LIST_HEAD(&glob->device_list);
+
+       ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
+       ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
+       if (unlikely(ret != 0)) {
+               printk(KERN_ERR TTM_PFX
+                      "Could not register buffer object swapout.\n");
+               goto out_no_shrink;
+       }
+
+       glob->ttm_bo_extra_size =
+               ttm_round_pot(sizeof(struct ttm_tt)) +
+               ttm_round_pot(sizeof(struct ttm_backend));
+
+       glob->ttm_bo_size = glob->ttm_bo_extra_size +
+               ttm_round_pot(sizeof(struct ttm_buffer_object));
+
+       atomic_set(&glob->bo_count, 0);
+
+       kobject_init(&glob->kobj, &ttm_bo_glob_kobj_type);
+       ret = kobject_add(&glob->kobj, ttm_get_kobj(), "buffer_objects");
+       if (unlikely(ret != 0))
+               kobject_put(&glob->kobj);
+       return ret;
+out_no_shrink:
+       __free_page(glob->dummy_read_page);
+out_no_drp:
+       kfree(glob);
+       return ret;
+}
+EXPORT_SYMBOL(ttm_bo_global_init);
+
+
 int ttm_bo_device_release(struct ttm_bo_device *bdev)
 {
        int ret = 0;
        unsigned i = TTM_NUM_MEM_TYPES;
        struct ttm_mem_type_manager *man;
+       struct ttm_bo_global *glob = bdev->glob;
 
        while (i--) {
                man = &bdev->man[i];
@@ -1303,100 +1454,74 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
                }
        }
 
+       mutex_lock(&glob->device_list_mutex);
+       list_del(&bdev->device_list);
+       mutex_unlock(&glob->device_list_mutex);
+
        if (!cancel_delayed_work(&bdev->wq))
                flush_scheduled_work();
 
        while (ttm_bo_delayed_delete(bdev, true))
                ;
 
-       spin_lock(&bdev->lru_lock);
+       spin_lock(&glob->lru_lock);
        if (list_empty(&bdev->ddestroy))
                TTM_DEBUG("Delayed destroy list was clean\n");
 
        if (list_empty(&bdev->man[0].lru))
                TTM_DEBUG("Swap list was clean\n");
-       spin_unlock(&bdev->lru_lock);
+       spin_unlock(&glob->lru_lock);
 
-       ttm_mem_unregister_shrink(bdev->mem_glob, &bdev->shrink);
        BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
        write_lock(&bdev->vm_lock);
        drm_mm_takedown(&bdev->addr_space_mm);
        write_unlock(&bdev->vm_lock);
 
-       __free_page(bdev->dummy_read_page);
        return ret;
 }
 EXPORT_SYMBOL(ttm_bo_device_release);
 
-/*
- * This function is intended to be called on drm driver load.
- * If you decide to call it from firstopen, you must protect the call
- * from a potentially racing ttm_bo_driver_finish in lastclose.
- * (This may happen on X server restart).
- */
-
 int ttm_bo_device_init(struct ttm_bo_device *bdev,
-                      struct ttm_mem_global *mem_glob,
-                      struct ttm_bo_driver *driver, uint64_t file_page_offset,
+                      struct ttm_bo_global *glob,
+                      struct ttm_bo_driver *driver,
+                      uint64_t file_page_offset,
                       bool need_dma32)
 {
        int ret = -EINVAL;
 
-       bdev->dummy_read_page = NULL;
        rwlock_init(&bdev->vm_lock);
-       spin_lock_init(&bdev->lru_lock);
-
        bdev->driver = driver;
-       bdev->mem_glob = mem_glob;
 
        memset(bdev->man, 0, sizeof(bdev->man));
 
-       bdev->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
-       if (unlikely(bdev->dummy_read_page == NULL)) {
-               ret = -ENOMEM;
-               goto out_err0;
-       }
-
        /*
         * Initialize the system memory buffer type.
         * Other types need to be driver / IOCTL initialized.
         */
-       ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0);
+       ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
        if (unlikely(ret != 0))
-               goto out_err1;
+               goto out_no_sys;
 
        bdev->addr_space_rb = RB_ROOT;
        ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
        if (unlikely(ret != 0))
-               goto out_err2;
+               goto out_no_addr_mm;
 
        INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
        bdev->nice_mode = true;
        INIT_LIST_HEAD(&bdev->ddestroy);
-       INIT_LIST_HEAD(&bdev->swap_lru);
        bdev->dev_mapping = NULL;
+       bdev->glob = glob;
        bdev->need_dma32 = need_dma32;
-       ttm_mem_init_shrink(&bdev->shrink, ttm_bo_swapout);
-       ret = ttm_mem_register_shrink(mem_glob, &bdev->shrink);
-       if (unlikely(ret != 0)) {
-               printk(KERN_ERR TTM_PFX
-                      "Could not register buffer object swapout.\n");
-               goto out_err2;
-       }
-
-       bdev->ttm_bo_extra_size =
-               ttm_round_pot(sizeof(struct ttm_tt)) +
-               ttm_round_pot(sizeof(struct ttm_backend));
 
-       bdev->ttm_bo_size = bdev->ttm_bo_extra_size +
-               ttm_round_pot(sizeof(struct ttm_buffer_object));
+       mutex_lock(&glob->device_list_mutex);
+       list_add_tail(&bdev->device_list, &glob->device_list);
+       mutex_unlock(&glob->device_list_mutex);
 
        return 0;
-out_err2:
+out_no_addr_mm:
        ttm_bo_clean_mm(bdev, 0);
-out_err1:
-       __free_page(bdev->dummy_read_page);
-out_err0:
+out_no_sys:
        return ret;
 }
 EXPORT_SYMBOL(ttm_bo_device_init);
@@ -1575,6 +1700,10 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
                        driver->sync_obj_unref(&sync_obj);
                        driver->sync_obj_unref(&tmp_obj);
                        spin_lock(&bo->lock);
+               } else {
+                       spin_unlock(&bo->lock);
+                       driver->sync_obj_unref(&sync_obj);
+                       spin_lock(&bo->lock);
                }
        }
        return 0;
@@ -1599,7 +1728,7 @@ int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible,
                        ret = wait_event_interruptible
                            (bo->event_queue, atomic_read(&bo->reserved) == 0);
                        if (unlikely(ret != 0))
-                               return -ERESTART;
+                               return ret;
                } else {
                        wait_event(bo->event_queue,
                                   atomic_read(&bo->reserved) == 0);
@@ -1628,12 +1757,14 @@ int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
        ttm_bo_unreserve(bo);
        return ret;
 }
+EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
 
 void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
 {
        if (atomic_dec_and_test(&bo->cpu_writers))
                wake_up_all(&bo->event_queue);
 }
+EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
 
 /**
  * A buffer object shrink method that tries to swap out the first
@@ -1642,21 +1773,21 @@ void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
 
 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
 {
-       struct ttm_bo_device *bdev =
-           container_of(shrink, struct ttm_bo_device, shrink);
+       struct ttm_bo_global *glob =
+           container_of(shrink, struct ttm_bo_global, shrink);
        struct ttm_buffer_object *bo;
        int ret = -EBUSY;
        int put_count;
        uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
 
-       spin_lock(&bdev->lru_lock);
+       spin_lock(&glob->lru_lock);
        while (ret == -EBUSY) {
-               if (unlikely(list_empty(&bdev->swap_lru))) {
-                       spin_unlock(&bdev->lru_lock);
+               if (unlikely(list_empty(&glob->swap_lru))) {
+                       spin_unlock(&glob->lru_lock);
                        return -EBUSY;
                }
 
-               bo = list_first_entry(&bdev->swap_lru,
+               bo = list_first_entry(&glob->swap_lru,
                                      struct ttm_buffer_object, swap);
                kref_get(&bo->list_kref);
 
@@ -1668,16 +1799,16 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
 
                ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
                if (unlikely(ret == -EBUSY)) {
-                       spin_unlock(&bdev->lru_lock);
+                       spin_unlock(&glob->lru_lock);
                        ttm_bo_wait_unreserved(bo, false);
                        kref_put(&bo->list_kref, ttm_bo_release_list);
-                       spin_lock(&bdev->lru_lock);
+                       spin_lock(&glob->lru_lock);
                }
        }
 
        BUG_ON(ret != 0);
        put_count = ttm_bo_del_from_lru(bo);
-       spin_unlock(&bdev->lru_lock);
+       spin_unlock(&glob->lru_lock);
 
        while (put_count--)
                kref_put(&bo->list_kref, ttm_bo_ref_bug);
@@ -1714,6 +1845,9 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
         * anyone tries to access a ttm page.
         */
 
+       if (bo->bdev->driver->swap_notify)
+               bo->bdev->driver->swap_notify(bo);
+
        ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
 out:
 
@@ -1731,6 +1865,7 @@ out:
 
 void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
 {
-       while (ttm_bo_swapout(&bdev->shrink) == 0)
+       while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
                ;
 }
+EXPORT_SYMBOL(ttm_bo_swapout_all);