include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[linux-3.10.git] / drivers / gpu / drm / nouveau / nouveau_bo.c
1 /*
2  * Copyright 2007 Dave Airlied
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  */
24 /*
25  * Authors: Dave Airlied <airlied@linux.ie>
26  *          Ben Skeggs   <darktama@iinet.net.au>
27  *          Jeremy Kolb  <jkolb@brandeis.edu>
28  */
29
30 #include "drmP.h"
31
32 #include "nouveau_drm.h"
33 #include "nouveau_drv.h"
34 #include "nouveau_dma.h"
35
36 #include <linux/log2.h>
37 #include <linux/slab.h>
38
39 static void
40 nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
41 {
42         struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
43         struct drm_device *dev = dev_priv->dev;
44         struct nouveau_bo *nvbo = nouveau_bo(bo);
45
46         ttm_bo_kunmap(&nvbo->kmap);
47
48         if (unlikely(nvbo->gem))
49                 DRM_ERROR("bo %p still attached to GEM object\n", bo);
50
51         if (nvbo->tile)
52                 nv10_mem_expire_tiling(dev, nvbo->tile, NULL);
53
54         spin_lock(&dev_priv->ttm.bo_list_lock);
55         list_del(&nvbo->head);
56         spin_unlock(&dev_priv->ttm.bo_list_lock);
57         kfree(nvbo);
58 }
59
60 static void
61 nouveau_bo_fixup_align(struct drm_device *dev,
62                        uint32_t tile_mode, uint32_t tile_flags,
63                        int *align, int *size)
64 {
65         struct drm_nouveau_private *dev_priv = dev->dev_private;
66
67         /*
68          * Some of the tile_flags have a periodic structure of N*4096 bytes,
69          * align to to that as well as the page size. Align the size to the
70          * appropriate boundaries. This does imply that sizes are rounded up
71          * 3-7 pages, so be aware of this and do not waste memory by allocating
72          * many small buffers.
73          */
74         if (dev_priv->card_type == NV_50) {
75                 uint32_t block_size = nouveau_mem_fb_amount(dev) >> 15;
76                 int i;
77
78                 switch (tile_flags) {
79                 case 0x1800:
80                 case 0x2800:
81                 case 0x4800:
82                 case 0x7a00:
83                         if (is_power_of_2(block_size)) {
84                                 for (i = 1; i < 10; i++) {
85                                         *align = 12 * i * block_size;
86                                         if (!(*align % 65536))
87                                                 break;
88                                 }
89                         } else {
90                                 for (i = 1; i < 10; i++) {
91                                         *align = 8 * i * block_size;
92                                         if (!(*align % 65536))
93                                                 break;
94                                 }
95                         }
96                         *size = roundup(*size, *align);
97                         break;
98                 default:
99                         break;
100                 }
101
102         } else {
103                 if (tile_mode) {
104                         if (dev_priv->chipset >= 0x40) {
105                                 *align = 65536;
106                                 *size = roundup(*size, 64 * tile_mode);
107
108                         } else if (dev_priv->chipset >= 0x30) {
109                                 *align = 32768;
110                                 *size = roundup(*size, 64 * tile_mode);
111
112                         } else if (dev_priv->chipset >= 0x20) {
113                                 *align = 16384;
114                                 *size = roundup(*size, 64 * tile_mode);
115
116                         } else if (dev_priv->chipset >= 0x10) {
117                                 *align = 16384;
118                                 *size = roundup(*size, 32 * tile_mode);
119                         }
120                 }
121         }
122
123         /* ALIGN works only on powers of two. */
124         *size = roundup(*size, PAGE_SIZE);
125
126         if (dev_priv->card_type == NV_50) {
127                 *size = roundup(*size, 65536);
128                 *align = max(65536, *align);
129         }
130 }
131
132 int
133 nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
134                int size, int align, uint32_t flags, uint32_t tile_mode,
135                uint32_t tile_flags, bool no_vm, bool mappable,
136                struct nouveau_bo **pnvbo)
137 {
138         struct drm_nouveau_private *dev_priv = dev->dev_private;
139         struct nouveau_bo *nvbo;
140         int ret = 0;
141
142         nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
143         if (!nvbo)
144                 return -ENOMEM;
145         INIT_LIST_HEAD(&nvbo->head);
146         INIT_LIST_HEAD(&nvbo->entry);
147         nvbo->mappable = mappable;
148         nvbo->no_vm = no_vm;
149         nvbo->tile_mode = tile_mode;
150         nvbo->tile_flags = tile_flags;
151
152         nouveau_bo_fixup_align(dev, tile_mode, tile_flags, &align, &size);
153         align >>= PAGE_SHIFT;
154
155         nvbo->placement.fpfn = 0;
156         nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0;
157         nouveau_bo_placement_set(nvbo, flags);
158
159         nvbo->channel = chan;
160         ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
161                           ttm_bo_type_device, &nvbo->placement, align, 0,
162                           false, NULL, size, nouveau_bo_del_ttm);
163         nvbo->channel = NULL;
164         if (ret) {
165                 /* ttm will call nouveau_bo_del_ttm if it fails.. */
166                 return ret;
167         }
168
169         spin_lock(&dev_priv->ttm.bo_list_lock);
170         list_add_tail(&nvbo->head, &dev_priv->ttm.bo_list);
171         spin_unlock(&dev_priv->ttm.bo_list_lock);
172         *pnvbo = nvbo;
173         return 0;
174 }
175
176 void
177 nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t memtype)
178 {
179         int n = 0;
180
181         if (memtype & TTM_PL_FLAG_VRAM)
182                 nvbo->placements[n++] = TTM_PL_FLAG_VRAM | TTM_PL_MASK_CACHING;
183         if (memtype & TTM_PL_FLAG_TT)
184                 nvbo->placements[n++] = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
185         if (memtype & TTM_PL_FLAG_SYSTEM)
186                 nvbo->placements[n++] = TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING;
187         nvbo->placement.placement = nvbo->placements;
188         nvbo->placement.busy_placement = nvbo->placements;
189         nvbo->placement.num_placement = n;
190         nvbo->placement.num_busy_placement = n;
191
192         if (nvbo->pin_refcnt) {
193                 while (n--)
194                         nvbo->placements[n] |= TTM_PL_FLAG_NO_EVICT;
195         }
196 }
197
198 int
199 nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
200 {
201         struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
202         struct ttm_buffer_object *bo = &nvbo->bo;
203         int ret, i;
204
205         if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
206                 NV_ERROR(nouveau_bdev(bo->bdev)->dev,
207                          "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
208                          1 << bo->mem.mem_type, memtype);
209                 return -EINVAL;
210         }
211
212         if (nvbo->pin_refcnt++)
213                 return 0;
214
215         ret = ttm_bo_reserve(bo, false, false, false, 0);
216         if (ret)
217                 goto out;
218
219         nouveau_bo_placement_set(nvbo, memtype);
220         for (i = 0; i < nvbo->placement.num_placement; i++)
221                 nvbo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
222
223         ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
224         if (ret == 0) {
225                 switch (bo->mem.mem_type) {
226                 case TTM_PL_VRAM:
227                         dev_priv->fb_aper_free -= bo->mem.size;
228                         break;
229                 case TTM_PL_TT:
230                         dev_priv->gart_info.aper_free -= bo->mem.size;
231                         break;
232                 default:
233                         break;
234                 }
235         }
236         ttm_bo_unreserve(bo);
237 out:
238         if (unlikely(ret))
239                 nvbo->pin_refcnt--;
240         return ret;
241 }
242
243 int
244 nouveau_bo_unpin(struct nouveau_bo *nvbo)
245 {
246         struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
247         struct ttm_buffer_object *bo = &nvbo->bo;
248         int ret, i;
249
250         if (--nvbo->pin_refcnt)
251                 return 0;
252
253         ret = ttm_bo_reserve(bo, false, false, false, 0);
254         if (ret)
255                 return ret;
256
257         for (i = 0; i < nvbo->placement.num_placement; i++)
258                 nvbo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
259
260         ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
261         if (ret == 0) {
262                 switch (bo->mem.mem_type) {
263                 case TTM_PL_VRAM:
264                         dev_priv->fb_aper_free += bo->mem.size;
265                         break;
266                 case TTM_PL_TT:
267                         dev_priv->gart_info.aper_free += bo->mem.size;
268                         break;
269                 default:
270                         break;
271                 }
272         }
273
274         ttm_bo_unreserve(bo);
275         return ret;
276 }
277
278 int
279 nouveau_bo_map(struct nouveau_bo *nvbo)
280 {
281         int ret;
282
283         ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
284         if (ret)
285                 return ret;
286
287         ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
288         ttm_bo_unreserve(&nvbo->bo);
289         return ret;
290 }
291
292 void
293 nouveau_bo_unmap(struct nouveau_bo *nvbo)
294 {
295         ttm_bo_kunmap(&nvbo->kmap);
296 }
297
298 u16
299 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
300 {
301         bool is_iomem;
302         u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
303         mem = &mem[index];
304         if (is_iomem)
305                 return ioread16_native((void __force __iomem *)mem);
306         else
307                 return *mem;
308 }
309
310 void
311 nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
312 {
313         bool is_iomem;
314         u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
315         mem = &mem[index];
316         if (is_iomem)
317                 iowrite16_native(val, (void __force __iomem *)mem);
318         else
319                 *mem = val;
320 }
321
322 u32
323 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
324 {
325         bool is_iomem;
326         u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
327         mem = &mem[index];
328         if (is_iomem)
329                 return ioread32_native((void __force __iomem *)mem);
330         else
331                 return *mem;
332 }
333
334 void
335 nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
336 {
337         bool is_iomem;
338         u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
339         mem = &mem[index];
340         if (is_iomem)
341                 iowrite32_native(val, (void __force __iomem *)mem);
342         else
343                 *mem = val;
344 }
345
346 static struct ttm_backend *
347 nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
348 {
349         struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
350         struct drm_device *dev = dev_priv->dev;
351
352         switch (dev_priv->gart_info.type) {
353 #if __OS_HAS_AGP
354         case NOUVEAU_GART_AGP:
355                 return ttm_agp_backend_init(bdev, dev->agp->bridge);
356 #endif
357         case NOUVEAU_GART_SGDMA:
358                 return nouveau_sgdma_init_ttm(dev);
359         default:
360                 NV_ERROR(dev, "Unknown GART type %d\n",
361                          dev_priv->gart_info.type);
362                 break;
363         }
364
365         return NULL;
366 }
367
368 static int
369 nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
370 {
371         /* We'll do this from user space. */
372         return 0;
373 }
374
375 static int
376 nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
377                          struct ttm_mem_type_manager *man)
378 {
379         struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
380         struct drm_device *dev = dev_priv->dev;
381
382         switch (type) {
383         case TTM_PL_SYSTEM:
384                 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
385                 man->available_caching = TTM_PL_MASK_CACHING;
386                 man->default_caching = TTM_PL_FLAG_CACHED;
387                 break;
388         case TTM_PL_VRAM:
389                 man->flags = TTM_MEMTYPE_FLAG_FIXED |
390                              TTM_MEMTYPE_FLAG_MAPPABLE |
391                              TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
392                 man->available_caching = TTM_PL_FLAG_UNCACHED |
393                                          TTM_PL_FLAG_WC;
394                 man->default_caching = TTM_PL_FLAG_WC;
395
396                 man->io_addr = NULL;
397                 man->io_offset = drm_get_resource_start(dev, 1);
398                 man->io_size = drm_get_resource_len(dev, 1);
399                 if (man->io_size > nouveau_mem_fb_amount(dev))
400                         man->io_size = nouveau_mem_fb_amount(dev);
401
402                 man->gpu_offset = dev_priv->vm_vram_base;
403                 break;
404         case TTM_PL_TT:
405                 switch (dev_priv->gart_info.type) {
406                 case NOUVEAU_GART_AGP:
407                         man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
408                                      TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
409                         man->available_caching = TTM_PL_FLAG_UNCACHED;
410                         man->default_caching = TTM_PL_FLAG_UNCACHED;
411                         break;
412                 case NOUVEAU_GART_SGDMA:
413                         man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
414                                      TTM_MEMTYPE_FLAG_CMA;
415                         man->available_caching = TTM_PL_MASK_CACHING;
416                         man->default_caching = TTM_PL_FLAG_CACHED;
417                         break;
418                 default:
419                         NV_ERROR(dev, "Unknown GART type: %d\n",
420                                  dev_priv->gart_info.type);
421                         return -EINVAL;
422                 }
423
424                 man->io_offset  = dev_priv->gart_info.aper_base;
425                 man->io_size    = dev_priv->gart_info.aper_size;
426                 man->io_addr   = NULL;
427                 man->gpu_offset = dev_priv->vm_gart_base;
428                 break;
429         default:
430                 NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
431                 return -EINVAL;
432         }
433         return 0;
434 }
435
436 static void
437 nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
438 {
439         struct nouveau_bo *nvbo = nouveau_bo(bo);
440
441         switch (bo->mem.mem_type) {
442         case TTM_PL_VRAM:
443                 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT |
444                                          TTM_PL_FLAG_SYSTEM);
445                 break;
446         default:
447                 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM);
448                 break;
449         }
450
451         *pl = nvbo->placement;
452 }
453
454
455 /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
456  * TTM_PL_{VRAM,TT} directly.
457  */
458
459 static int
460 nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
461                               struct nouveau_bo *nvbo, bool evict, bool no_wait,
462                               struct ttm_mem_reg *new_mem)
463 {
464         struct nouveau_fence *fence = NULL;
465         int ret;
466
467         ret = nouveau_fence_new(chan, &fence, true);
468         if (ret)
469                 return ret;
470
471         ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL,
472                                         evict, no_wait, new_mem);
473         if (nvbo->channel && nvbo->channel != chan)
474                 ret = nouveau_fence_wait(fence, NULL, false, false);
475         nouveau_fence_unref((void *)&fence);
476         return ret;
477 }
478
479 static inline uint32_t
480 nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
481                       struct ttm_mem_reg *mem)
482 {
483         if (chan == nouveau_bdev(nvbo->bo.bdev)->channel) {
484                 if (mem->mem_type == TTM_PL_TT)
485                         return NvDmaGART;
486                 return NvDmaVRAM;
487         }
488
489         if (mem->mem_type == TTM_PL_TT)
490                 return chan->gart_handle;
491         return chan->vram_handle;
492 }
493
494 static int
495 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
496                      int no_wait, struct ttm_mem_reg *new_mem)
497 {
498         struct nouveau_bo *nvbo = nouveau_bo(bo);
499         struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
500         struct ttm_mem_reg *old_mem = &bo->mem;
501         struct nouveau_channel *chan;
502         uint64_t src_offset, dst_offset;
503         uint32_t page_count;
504         int ret;
505
506         chan = nvbo->channel;
507         if (!chan || nvbo->tile_flags || nvbo->no_vm)
508                 chan = dev_priv->channel;
509
510         src_offset = old_mem->mm_node->start << PAGE_SHIFT;
511         dst_offset = new_mem->mm_node->start << PAGE_SHIFT;
512         if (chan != dev_priv->channel) {
513                 if (old_mem->mem_type == TTM_PL_TT)
514                         src_offset += dev_priv->vm_gart_base;
515                 else
516                         src_offset += dev_priv->vm_vram_base;
517
518                 if (new_mem->mem_type == TTM_PL_TT)
519                         dst_offset += dev_priv->vm_gart_base;
520                 else
521                         dst_offset += dev_priv->vm_vram_base;
522         }
523
524         ret = RING_SPACE(chan, 3);
525         if (ret)
526                 return ret;
527         BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
528         OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, old_mem));
529         OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, new_mem));
530
531         if (dev_priv->card_type >= NV_50) {
532                 ret = RING_SPACE(chan, 4);
533                 if (ret)
534                         return ret;
535                 BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
536                 OUT_RING(chan, 1);
537                 BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
538                 OUT_RING(chan, 1);
539         }
540
541         page_count = new_mem->num_pages;
542         while (page_count) {
543                 int line_count = (page_count > 2047) ? 2047 : page_count;
544
545                 if (dev_priv->card_type >= NV_50) {
546                         ret = RING_SPACE(chan, 3);
547                         if (ret)
548                                 return ret;
549                         BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
550                         OUT_RING(chan, upper_32_bits(src_offset));
551                         OUT_RING(chan, upper_32_bits(dst_offset));
552                 }
553                 ret = RING_SPACE(chan, 11);
554                 if (ret)
555                         return ret;
556                 BEGIN_RING(chan, NvSubM2MF,
557                                  NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
558                 OUT_RING(chan, lower_32_bits(src_offset));
559                 OUT_RING(chan, lower_32_bits(dst_offset));
560                 OUT_RING(chan, PAGE_SIZE); /* src_pitch */
561                 OUT_RING(chan, PAGE_SIZE); /* dst_pitch */
562                 OUT_RING(chan, PAGE_SIZE); /* line_length */
563                 OUT_RING(chan, line_count);
564                 OUT_RING(chan, (1<<8)|(1<<0));
565                 OUT_RING(chan, 0);
566                 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
567                 OUT_RING(chan, 0);
568
569                 page_count -= line_count;
570                 src_offset += (PAGE_SIZE * line_count);
571                 dst_offset += (PAGE_SIZE * line_count);
572         }
573
574         return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait, new_mem);
575 }
576
577 static int
578 nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
579                       bool no_wait, struct ttm_mem_reg *new_mem)
580 {
581         u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
582         struct ttm_placement placement;
583         struct ttm_mem_reg tmp_mem;
584         int ret;
585
586         placement.fpfn = placement.lpfn = 0;
587         placement.num_placement = placement.num_busy_placement = 1;
588         placement.placement = placement.busy_placement = &placement_memtype;
589
590         tmp_mem = *new_mem;
591         tmp_mem.mm_node = NULL;
592         ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait);
593         if (ret)
594                 return ret;
595
596         ret = ttm_tt_bind(bo->ttm, &tmp_mem);
597         if (ret)
598                 goto out;
599
600         ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait, &tmp_mem);
601         if (ret)
602                 goto out;
603
604         ret = ttm_bo_move_ttm(bo, evict, no_wait, new_mem);
605 out:
606         if (tmp_mem.mm_node) {
607                 spin_lock(&bo->bdev->glob->lru_lock);
608                 drm_mm_put_block(tmp_mem.mm_node);
609                 spin_unlock(&bo->bdev->glob->lru_lock);
610         }
611
612         return ret;
613 }
614
615 static int
616 nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
617                       bool no_wait, struct ttm_mem_reg *new_mem)
618 {
619         u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
620         struct ttm_placement placement;
621         struct ttm_mem_reg tmp_mem;
622         int ret;
623
624         placement.fpfn = placement.lpfn = 0;
625         placement.num_placement = placement.num_busy_placement = 1;
626         placement.placement = placement.busy_placement = &placement_memtype;
627
628         tmp_mem = *new_mem;
629         tmp_mem.mm_node = NULL;
630         ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait);
631         if (ret)
632                 return ret;
633
634         ret = ttm_bo_move_ttm(bo, evict, no_wait, &tmp_mem);
635         if (ret)
636                 goto out;
637
638         ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem);
639         if (ret)
640                 goto out;
641
642 out:
643         if (tmp_mem.mm_node) {
644                 spin_lock(&bo->bdev->glob->lru_lock);
645                 drm_mm_put_block(tmp_mem.mm_node);
646                 spin_unlock(&bo->bdev->glob->lru_lock);
647         }
648
649         return ret;
650 }
651
652 static int
653 nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
654                    struct nouveau_tile_reg **new_tile)
655 {
656         struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
657         struct drm_device *dev = dev_priv->dev;
658         struct nouveau_bo *nvbo = nouveau_bo(bo);
659         uint64_t offset;
660         int ret;
661
662         if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) {
663                 /* Nothing to do. */
664                 *new_tile = NULL;
665                 return 0;
666         }
667
668         offset = new_mem->mm_node->start << PAGE_SHIFT;
669
670         if (dev_priv->card_type == NV_50) {
671                 ret = nv50_mem_vm_bind_linear(dev,
672                                               offset + dev_priv->vm_vram_base,
673                                               new_mem->size, nvbo->tile_flags,
674                                               offset);
675                 if (ret)
676                         return ret;
677
678         } else if (dev_priv->card_type >= NV_10) {
679                 *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
680                                                 nvbo->tile_mode);
681         }
682
683         return 0;
684 }
685
686 static void
687 nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
688                       struct nouveau_tile_reg *new_tile,
689                       struct nouveau_tile_reg **old_tile)
690 {
691         struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
692         struct drm_device *dev = dev_priv->dev;
693
694         if (dev_priv->card_type >= NV_10 &&
695             dev_priv->card_type < NV_50) {
696                 if (*old_tile)
697                         nv10_mem_expire_tiling(dev, *old_tile, bo->sync_obj);
698
699                 *old_tile = new_tile;
700         }
701 }
702
703 static int
704 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
705                 bool no_wait, struct ttm_mem_reg *new_mem)
706 {
707         struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
708         struct nouveau_bo *nvbo = nouveau_bo(bo);
709         struct ttm_mem_reg *old_mem = &bo->mem;
710         struct nouveau_tile_reg *new_tile = NULL;
711         int ret = 0;
712
713         ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
714         if (ret)
715                 return ret;
716
717         /* Software copy if the card isn't up and running yet. */
718         if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE ||
719             !dev_priv->channel) {
720                 ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
721                 goto out;
722         }
723
724         /* Fake bo copy. */
725         if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
726                 BUG_ON(bo->mem.mm_node != NULL);
727                 bo->mem = *new_mem;
728                 new_mem->mm_node = NULL;
729                 goto out;
730         }
731
732         /* Hardware assisted copy. */
733         if (new_mem->mem_type == TTM_PL_SYSTEM)
734                 ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem);
735         else if (old_mem->mem_type == TTM_PL_SYSTEM)
736                 ret = nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem);
737         else
738                 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem);
739
740         if (!ret)
741                 goto out;
742
743         /* Fallback to software copy. */
744         ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
745
746 out:
747         if (ret)
748                 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
749         else
750                 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
751
752         return ret;
753 }
754
755 static int
756 nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
757 {
758         return 0;
759 }
760
761 struct ttm_bo_driver nouveau_bo_driver = {
762         .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
763         .invalidate_caches = nouveau_bo_invalidate_caches,
764         .init_mem_type = nouveau_bo_init_mem_type,
765         .evict_flags = nouveau_bo_evict_flags,
766         .move = nouveau_bo_move,
767         .verify_access = nouveau_bo_verify_access,
768         .sync_obj_signaled = nouveau_fence_signalled,
769         .sync_obj_wait = nouveau_fence_wait,
770         .sync_obj_flush = nouveau_fence_flush,
771         .sync_obj_unref = nouveau_fence_unref,
772         .sync_obj_ref = nouveau_fence_ref,
773 };
774