]> nv-tegra.nvidia Code Review - linux-2.6.git/blob - drivers/gpu/drm/radeon/radeon_ttm.c
drm/radeon/kms: Convert radeon to new TTM validation API (V2)
[linux-2.6.git] / drivers / gpu / drm / radeon / radeon_ttm.c
1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <glisse@freedesktop.org>
29  *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30  *    Dave Airlie
31  */
32 #include <ttm/ttm_bo_api.h>
33 #include <ttm/ttm_bo_driver.h>
34 #include <ttm/ttm_placement.h>
35 #include <ttm/ttm_module.h>
36 #include <drm/drmP.h>
37 #include <drm/radeon_drm.h>
38 #include <linux/seq_file.h>
39 #include "radeon_reg.h"
40 #include "radeon.h"
41
42 #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
43
44 static int radeon_ttm_debugfs_init(struct radeon_device *rdev);
45
46 static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
47 {
48         struct radeon_mman *mman;
49         struct radeon_device *rdev;
50
51         mman = container_of(bdev, struct radeon_mman, bdev);
52         rdev = container_of(mman, struct radeon_device, mman);
53         return rdev;
54 }
55
56
57 /*
58  * Global memory.
59  */
60 static int radeon_ttm_mem_global_init(struct ttm_global_reference *ref)
61 {
62         return ttm_mem_global_init(ref->object);
63 }
64
65 static void radeon_ttm_mem_global_release(struct ttm_global_reference *ref)
66 {
67         ttm_mem_global_release(ref->object);
68 }
69
70 static int radeon_ttm_global_init(struct radeon_device *rdev)
71 {
72         struct ttm_global_reference *global_ref;
73         int r;
74
75         rdev->mman.mem_global_referenced = false;
76         global_ref = &rdev->mman.mem_global_ref;
77         global_ref->global_type = TTM_GLOBAL_TTM_MEM;
78         global_ref->size = sizeof(struct ttm_mem_global);
79         global_ref->init = &radeon_ttm_mem_global_init;
80         global_ref->release = &radeon_ttm_mem_global_release;
81         r = ttm_global_item_ref(global_ref);
82         if (r != 0) {
83                 DRM_ERROR("Failed setting up TTM memory accounting "
84                           "subsystem.\n");
85                 return r;
86         }
87
88         rdev->mman.bo_global_ref.mem_glob =
89                 rdev->mman.mem_global_ref.object;
90         global_ref = &rdev->mman.bo_global_ref.ref;
91         global_ref->global_type = TTM_GLOBAL_TTM_BO;
92         global_ref->size = sizeof(struct ttm_bo_global);
93         global_ref->init = &ttm_bo_global_init;
94         global_ref->release = &ttm_bo_global_release;
95         r = ttm_global_item_ref(global_ref);
96         if (r != 0) {
97                 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
98                 ttm_global_item_unref(&rdev->mman.mem_global_ref);
99                 return r;
100         }
101
102         rdev->mman.mem_global_referenced = true;
103         return 0;
104 }
105
106 static void radeon_ttm_global_fini(struct radeon_device *rdev)
107 {
108         if (rdev->mman.mem_global_referenced) {
109                 ttm_global_item_unref(&rdev->mman.bo_global_ref.ref);
110                 ttm_global_item_unref(&rdev->mman.mem_global_ref);
111                 rdev->mman.mem_global_referenced = false;
112         }
113 }
114
115 struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev);
116
117 static struct ttm_backend*
118 radeon_create_ttm_backend_entry(struct ttm_bo_device *bdev)
119 {
120         struct radeon_device *rdev;
121
122         rdev = radeon_get_rdev(bdev);
123 #if __OS_HAS_AGP
124         if (rdev->flags & RADEON_IS_AGP) {
125                 return ttm_agp_backend_init(bdev, rdev->ddev->agp->bridge);
126         } else
127 #endif
128         {
129                 return radeon_ttm_backend_create(rdev);
130         }
131 }
132
133 static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
134 {
135         return 0;
136 }
137
138 static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
139                                 struct ttm_mem_type_manager *man)
140 {
141         struct radeon_device *rdev;
142
143         rdev = radeon_get_rdev(bdev);
144
145         switch (type) {
146         case TTM_PL_SYSTEM:
147                 /* System memory */
148                 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
149                 man->available_caching = TTM_PL_MASK_CACHING;
150                 man->default_caching = TTM_PL_FLAG_CACHED;
151                 break;
152         case TTM_PL_TT:
153                 man->gpu_offset = rdev->mc.gtt_location;
154                 man->available_caching = TTM_PL_MASK_CACHING;
155                 man->default_caching = TTM_PL_FLAG_CACHED;
156                 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
157 #if __OS_HAS_AGP
158                 if (rdev->flags & RADEON_IS_AGP) {
159                         if (!(drm_core_has_AGP(rdev->ddev) && rdev->ddev->agp)) {
160                                 DRM_ERROR("AGP is not enabled for memory type %u\n",
161                                           (unsigned)type);
162                                 return -EINVAL;
163                         }
164                         man->io_offset = rdev->mc.agp_base;
165                         man->io_size = rdev->mc.gtt_size;
166                         man->io_addr = NULL;
167                         if (!rdev->ddev->agp->cant_use_aperture)
168                                 man->flags = TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
169                                              TTM_MEMTYPE_FLAG_MAPPABLE;
170                         man->available_caching = TTM_PL_FLAG_UNCACHED |
171                                                  TTM_PL_FLAG_WC;
172                         man->default_caching = TTM_PL_FLAG_WC;
173                 } else
174 #endif
175                 {
176                         man->io_offset = 0;
177                         man->io_size = 0;
178                         man->io_addr = NULL;
179                 }
180                 break;
181         case TTM_PL_VRAM:
182                 /* "On-card" video ram */
183                 man->gpu_offset = rdev->mc.vram_location;
184                 man->flags = TTM_MEMTYPE_FLAG_FIXED |
185                              TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
186                              TTM_MEMTYPE_FLAG_MAPPABLE;
187                 man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
188                 man->default_caching = TTM_PL_FLAG_WC;
189                 man->io_addr = NULL;
190                 man->io_offset = rdev->mc.aper_base;
191                 man->io_size = rdev->mc.aper_size;
192                 break;
193         default:
194                 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
195                 return -EINVAL;
196         }
197         return 0;
198 }
199
200 static void radeon_evict_flags(struct ttm_buffer_object *bo,
201                                 struct ttm_placement *placement)
202 {
203         struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
204         switch (bo->mem.mem_type) {
205         case TTM_PL_VRAM:
206                 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
207                 break;
208         case TTM_PL_TT:
209         default:
210                 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
211         }
212 }
213
214 static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
215 {
216         return 0;
217 }
218
219 static void radeon_move_null(struct ttm_buffer_object *bo,
220                              struct ttm_mem_reg *new_mem)
221 {
222         struct ttm_mem_reg *old_mem = &bo->mem;
223
224         BUG_ON(old_mem->mm_node != NULL);
225         *old_mem = *new_mem;
226         new_mem->mm_node = NULL;
227 }
228
229 static int radeon_move_blit(struct ttm_buffer_object *bo,
230                             bool evict, int no_wait,
231                             struct ttm_mem_reg *new_mem,
232                             struct ttm_mem_reg *old_mem)
233 {
234         struct radeon_device *rdev;
235         uint64_t old_start, new_start;
236         struct radeon_fence *fence;
237         int r;
238
239         rdev = radeon_get_rdev(bo->bdev);
240         r = radeon_fence_create(rdev, &fence);
241         if (unlikely(r)) {
242                 return r;
243         }
244         old_start = old_mem->mm_node->start << PAGE_SHIFT;
245         new_start = new_mem->mm_node->start << PAGE_SHIFT;
246
247         switch (old_mem->mem_type) {
248         case TTM_PL_VRAM:
249                 old_start += rdev->mc.vram_location;
250                 break;
251         case TTM_PL_TT:
252                 old_start += rdev->mc.gtt_location;
253                 break;
254         default:
255                 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
256                 return -EINVAL;
257         }
258         switch (new_mem->mem_type) {
259         case TTM_PL_VRAM:
260                 new_start += rdev->mc.vram_location;
261                 break;
262         case TTM_PL_TT:
263                 new_start += rdev->mc.gtt_location;
264                 break;
265         default:
266                 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
267                 return -EINVAL;
268         }
269         if (!rdev->cp.ready) {
270                 DRM_ERROR("Trying to move memory with CP turned off.\n");
271                 return -EINVAL;
272         }
273         r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence);
274         /* FIXME: handle copy error */
275         r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
276                                       evict, no_wait, new_mem);
277         radeon_fence_unref(&fence);
278         return r;
279 }
280
281 static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
282                                 bool evict, bool interruptible, bool no_wait,
283                                 struct ttm_mem_reg *new_mem)
284 {
285         struct radeon_device *rdev;
286         struct ttm_mem_reg *old_mem = &bo->mem;
287         struct ttm_mem_reg tmp_mem;
288         u32 placements;
289         struct ttm_placement placement;
290         int r;
291
292         rdev = radeon_get_rdev(bo->bdev);
293         tmp_mem = *new_mem;
294         tmp_mem.mm_node = NULL;
295         placement.fpfn = 0;
296         placement.lpfn = 0;
297         placement.num_placement = 1;
298         placement.placement = &placements;
299         placement.num_busy_placement = 1;
300         placement.busy_placement = &placements;
301         placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
302         r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
303                              interruptible, no_wait);
304         if (unlikely(r)) {
305                 return r;
306         }
307
308         r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
309         if (unlikely(r)) {
310                 goto out_cleanup;
311         }
312
313         r = ttm_tt_bind(bo->ttm, &tmp_mem);
314         if (unlikely(r)) {
315                 goto out_cleanup;
316         }
317         r = radeon_move_blit(bo, true, no_wait, &tmp_mem, old_mem);
318         if (unlikely(r)) {
319                 goto out_cleanup;
320         }
321         r = ttm_bo_move_ttm(bo, true, no_wait, new_mem);
322 out_cleanup:
323         if (tmp_mem.mm_node) {
324                 struct ttm_bo_global *glob = rdev->mman.bdev.glob;
325
326                 spin_lock(&glob->lru_lock);
327                 drm_mm_put_block(tmp_mem.mm_node);
328                 spin_unlock(&glob->lru_lock);
329                 return r;
330         }
331         return r;
332 }
333
334 static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
335                                 bool evict, bool interruptible, bool no_wait,
336                                 struct ttm_mem_reg *new_mem)
337 {
338         struct radeon_device *rdev;
339         struct ttm_mem_reg *old_mem = &bo->mem;
340         struct ttm_mem_reg tmp_mem;
341         struct ttm_placement placement;
342         u32 placements;
343         int r;
344
345         rdev = radeon_get_rdev(bo->bdev);
346         tmp_mem = *new_mem;
347         tmp_mem.mm_node = NULL;
348         placement.fpfn = 0;
349         placement.lpfn = 0;
350         placement.num_placement = 1;
351         placement.placement = &placements;
352         placement.num_busy_placement = 1;
353         placement.busy_placement = &placements;
354         placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
355         r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait);
356         if (unlikely(r)) {
357                 return r;
358         }
359         r = ttm_bo_move_ttm(bo, true, no_wait, &tmp_mem);
360         if (unlikely(r)) {
361                 goto out_cleanup;
362         }
363         r = radeon_move_blit(bo, true, no_wait, new_mem, old_mem);
364         if (unlikely(r)) {
365                 goto out_cleanup;
366         }
367 out_cleanup:
368         if (tmp_mem.mm_node) {
369                 struct ttm_bo_global *glob = rdev->mman.bdev.glob;
370
371                 spin_lock(&glob->lru_lock);
372                 drm_mm_put_block(tmp_mem.mm_node);
373                 spin_unlock(&glob->lru_lock);
374                 return r;
375         }
376         return r;
377 }
378
379 static int radeon_bo_move(struct ttm_buffer_object *bo,
380                           bool evict, bool interruptible, bool no_wait,
381                           struct ttm_mem_reg *new_mem)
382 {
383         struct radeon_device *rdev;
384         struct ttm_mem_reg *old_mem = &bo->mem;
385         int r;
386
387         rdev = radeon_get_rdev(bo->bdev);
388         if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
389                 radeon_move_null(bo, new_mem);
390                 return 0;
391         }
392         if ((old_mem->mem_type == TTM_PL_TT &&
393              new_mem->mem_type == TTM_PL_SYSTEM) ||
394             (old_mem->mem_type == TTM_PL_SYSTEM &&
395              new_mem->mem_type == TTM_PL_TT)) {
396                 /* bind is enought */
397                 radeon_move_null(bo, new_mem);
398                 return 0;
399         }
400         if (!rdev->cp.ready || rdev->asic->copy == NULL) {
401                 /* use memcpy */
402                 goto memcpy;
403         }
404
405         if (old_mem->mem_type == TTM_PL_VRAM &&
406             new_mem->mem_type == TTM_PL_SYSTEM) {
407                 r = radeon_move_vram_ram(bo, evict, interruptible,
408                                             no_wait, new_mem);
409         } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
410                    new_mem->mem_type == TTM_PL_VRAM) {
411                 r = radeon_move_ram_vram(bo, evict, interruptible,
412                                             no_wait, new_mem);
413         } else {
414                 r = radeon_move_blit(bo, evict, no_wait, new_mem, old_mem);
415         }
416
417         if (r) {
418 memcpy:
419                 r = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
420         }
421
422         return r;
423 }
424
425 static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg,
426                                 bool lazy, bool interruptible)
427 {
428         return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible);
429 }
430
431 static int radeon_sync_obj_flush(void *sync_obj, void *sync_arg)
432 {
433         return 0;
434 }
435
436 static void radeon_sync_obj_unref(void **sync_obj)
437 {
438         radeon_fence_unref((struct radeon_fence **)sync_obj);
439 }
440
441 static void *radeon_sync_obj_ref(void *sync_obj)
442 {
443         return radeon_fence_ref((struct radeon_fence *)sync_obj);
444 }
445
446 static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg)
447 {
448         return radeon_fence_signaled((struct radeon_fence *)sync_obj);
449 }
450
451 static struct ttm_bo_driver radeon_bo_driver = {
452         .create_ttm_backend_entry = &radeon_create_ttm_backend_entry,
453         .invalidate_caches = &radeon_invalidate_caches,
454         .init_mem_type = &radeon_init_mem_type,
455         .evict_flags = &radeon_evict_flags,
456         .move = &radeon_bo_move,
457         .verify_access = &radeon_verify_access,
458         .sync_obj_signaled = &radeon_sync_obj_signaled,
459         .sync_obj_wait = &radeon_sync_obj_wait,
460         .sync_obj_flush = &radeon_sync_obj_flush,
461         .sync_obj_unref = &radeon_sync_obj_unref,
462         .sync_obj_ref = &radeon_sync_obj_ref,
463         .move_notify = &radeon_bo_move_notify,
464         .fault_reserve_notify = &radeon_bo_fault_reserve_notify,
465 };
466
467 int radeon_ttm_init(struct radeon_device *rdev)
468 {
469         int r;
470
471         r = radeon_ttm_global_init(rdev);
472         if (r) {
473                 return r;
474         }
475         /* No others user of address space so set it to 0 */
476         r = ttm_bo_device_init(&rdev->mman.bdev,
477                                rdev->mman.bo_global_ref.ref.object,
478                                &radeon_bo_driver, DRM_FILE_PAGE_OFFSET,
479                                rdev->need_dma32);
480         if (r) {
481                 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
482                 return r;
483         }
484         r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM,
485                                 rdev->mc.real_vram_size >> PAGE_SHIFT);
486         if (r) {
487                 DRM_ERROR("Failed initializing VRAM heap.\n");
488                 return r;
489         }
490         r = radeon_bo_create(rdev, NULL, 256 * 1024, true,
491                                 RADEON_GEM_DOMAIN_VRAM,
492                                 &rdev->stollen_vga_memory);
493         if (r) {
494                 return r;
495         }
496         r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
497         if (r)
498                 return r;
499         r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
500         radeon_bo_unreserve(rdev->stollen_vga_memory);
501         if (r) {
502                 radeon_bo_unref(&rdev->stollen_vga_memory);
503                 return r;
504         }
505         DRM_INFO("radeon: %uM of VRAM memory ready\n",
506                  (unsigned)rdev->mc.real_vram_size / (1024 * 1024));
507         r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT,
508                                 rdev->mc.gtt_size >> PAGE_SHIFT);
509         if (r) {
510                 DRM_ERROR("Failed initializing GTT heap.\n");
511                 return r;
512         }
513         DRM_INFO("radeon: %uM of GTT memory ready.\n",
514                  (unsigned)(rdev->mc.gtt_size / (1024 * 1024)));
515         if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
516                 rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
517         }
518
519         r = radeon_ttm_debugfs_init(rdev);
520         if (r) {
521                 DRM_ERROR("Failed to init debugfs\n");
522                 return r;
523         }
524         return 0;
525 }
526
527 void radeon_ttm_fini(struct radeon_device *rdev)
528 {
529         int r;
530
531         if (rdev->stollen_vga_memory) {
532                 r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
533                 if (r == 0) {
534                         radeon_bo_unpin(rdev->stollen_vga_memory);
535                         radeon_bo_unreserve(rdev->stollen_vga_memory);
536                 }
537                 radeon_bo_unref(&rdev->stollen_vga_memory);
538         }
539         ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM);
540         ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);
541         ttm_bo_device_release(&rdev->mman.bdev);
542         radeon_gart_fini(rdev);
543         radeon_ttm_global_fini(rdev);
544         DRM_INFO("radeon: ttm finalized\n");
545 }
546
547 static struct vm_operations_struct radeon_ttm_vm_ops;
548 static const struct vm_operations_struct *ttm_vm_ops = NULL;
549
550 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
551 {
552         struct ttm_buffer_object *bo;
553         int r;
554
555         bo = (struct ttm_buffer_object *)vma->vm_private_data;
556         if (bo == NULL) {
557                 return VM_FAULT_NOPAGE;
558         }
559         r = ttm_vm_ops->fault(vma, vmf);
560         return r;
561 }
562
563 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
564 {
565         struct drm_file *file_priv;
566         struct radeon_device *rdev;
567         int r;
568
569         if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
570                 return drm_mmap(filp, vma);
571         }
572
573         file_priv = (struct drm_file *)filp->private_data;
574         rdev = file_priv->minor->dev->dev_private;
575         if (rdev == NULL) {
576                 return -EINVAL;
577         }
578         r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
579         if (unlikely(r != 0)) {
580                 return r;
581         }
582         if (unlikely(ttm_vm_ops == NULL)) {
583                 ttm_vm_ops = vma->vm_ops;
584                 radeon_ttm_vm_ops = *ttm_vm_ops;
585                 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
586         }
587         vma->vm_ops = &radeon_ttm_vm_ops;
588         return 0;
589 }
590
591
592 /*
593  * TTM backend functions.
594  */
595 struct radeon_ttm_backend {
596         struct ttm_backend              backend;
597         struct radeon_device            *rdev;
598         unsigned long                   num_pages;
599         struct page                     **pages;
600         struct page                     *dummy_read_page;
601         bool                            populated;
602         bool                            bound;
603         unsigned                        offset;
604 };
605
606 static int radeon_ttm_backend_populate(struct ttm_backend *backend,
607                                        unsigned long num_pages,
608                                        struct page **pages,
609                                        struct page *dummy_read_page)
610 {
611         struct radeon_ttm_backend *gtt;
612
613         gtt = container_of(backend, struct radeon_ttm_backend, backend);
614         gtt->pages = pages;
615         gtt->num_pages = num_pages;
616         gtt->dummy_read_page = dummy_read_page;
617         gtt->populated = true;
618         return 0;
619 }
620
621 static void radeon_ttm_backend_clear(struct ttm_backend *backend)
622 {
623         struct radeon_ttm_backend *gtt;
624
625         gtt = container_of(backend, struct radeon_ttm_backend, backend);
626         gtt->pages = NULL;
627         gtt->num_pages = 0;
628         gtt->dummy_read_page = NULL;
629         gtt->populated = false;
630         gtt->bound = false;
631 }
632
633
634 static int radeon_ttm_backend_bind(struct ttm_backend *backend,
635                                    struct ttm_mem_reg *bo_mem)
636 {
637         struct radeon_ttm_backend *gtt;
638         int r;
639
640         gtt = container_of(backend, struct radeon_ttm_backend, backend);
641         gtt->offset = bo_mem->mm_node->start << PAGE_SHIFT;
642         if (!gtt->num_pages) {
643                 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", gtt->num_pages, bo_mem, backend);
644         }
645         r = radeon_gart_bind(gtt->rdev, gtt->offset,
646                              gtt->num_pages, gtt->pages);
647         if (r) {
648                 DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
649                           gtt->num_pages, gtt->offset);
650                 return r;
651         }
652         gtt->bound = true;
653         return 0;
654 }
655
656 static int radeon_ttm_backend_unbind(struct ttm_backend *backend)
657 {
658         struct radeon_ttm_backend *gtt;
659
660         gtt = container_of(backend, struct radeon_ttm_backend, backend);
661         radeon_gart_unbind(gtt->rdev, gtt->offset, gtt->num_pages);
662         gtt->bound = false;
663         return 0;
664 }
665
666 static void radeon_ttm_backend_destroy(struct ttm_backend *backend)
667 {
668         struct radeon_ttm_backend *gtt;
669
670         gtt = container_of(backend, struct radeon_ttm_backend, backend);
671         if (gtt->bound) {
672                 radeon_ttm_backend_unbind(backend);
673         }
674         kfree(gtt);
675 }
676
677 static struct ttm_backend_func radeon_backend_func = {
678         .populate = &radeon_ttm_backend_populate,
679         .clear = &radeon_ttm_backend_clear,
680         .bind = &radeon_ttm_backend_bind,
681         .unbind = &radeon_ttm_backend_unbind,
682         .destroy = &radeon_ttm_backend_destroy,
683 };
684
685 struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev)
686 {
687         struct radeon_ttm_backend *gtt;
688
689         gtt = kzalloc(sizeof(struct radeon_ttm_backend), GFP_KERNEL);
690         if (gtt == NULL) {
691                 return NULL;
692         }
693         gtt->backend.bdev = &rdev->mman.bdev;
694         gtt->backend.flags = 0;
695         gtt->backend.func = &radeon_backend_func;
696         gtt->rdev = rdev;
697         gtt->pages = NULL;
698         gtt->num_pages = 0;
699         gtt->dummy_read_page = NULL;
700         gtt->populated = false;
701         gtt->bound = false;
702         return &gtt->backend;
703 }
704
705 #define RADEON_DEBUGFS_MEM_TYPES 2
706
707 #if defined(CONFIG_DEBUG_FS)
708 static int radeon_mm_dump_table(struct seq_file *m, void *data)
709 {
710         struct drm_info_node *node = (struct drm_info_node *)m->private;
711         struct drm_mm *mm = (struct drm_mm *)node->info_ent->data;
712         struct drm_device *dev = node->minor->dev;
713         struct radeon_device *rdev = dev->dev_private;
714         int ret;
715         struct ttm_bo_global *glob = rdev->mman.bdev.glob;
716
717         spin_lock(&glob->lru_lock);
718         ret = drm_mm_dump_table(m, mm);
719         spin_unlock(&glob->lru_lock);
720         return ret;
721 }
722 #endif
723
724 static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
725 {
726 #if defined(CONFIG_DEBUG_FS)
727         static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES];
728         static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES][32];
729         unsigned i;
730
731         for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
732                 if (i == 0)
733                         sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
734                 else
735                         sprintf(radeon_mem_types_names[i], "radeon_gtt_mm");
736                 radeon_mem_types_list[i].name = radeon_mem_types_names[i];
737                 radeon_mem_types_list[i].show = &radeon_mm_dump_table;
738                 radeon_mem_types_list[i].driver_features = 0;
739                 if (i == 0)
740                         radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_VRAM].manager;
741                 else
742                         radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].manager;
743
744         }
745         return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES);
746
747 #endif
748         return 0;
749 }