445f27efe677f0c7de0ae481fa11423d562a3b3b
[linux-2.6.git] / drivers / gpu / drm / i915 / intel_ringbuffer.c
1 /*
2  * Copyright © 2008-2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Zou Nan hai <nanhai.zou@intel.com>
26  *    Xiang Hai hao<haihao.xiang@intel.com>
27  *
28  */
29
30 #include "drmP.h"
31 #include "drm.h"
32 #include "i915_drv.h"
33 #include "i915_drm.h"
34 #include "i915_trace.h"
35 #include "intel_drv.h"
36
37 static inline int ring_space(struct intel_ring_buffer *ring)
38 {
39         int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
40         if (space < 0)
41                 space += ring->size;
42         return space;
43 }
44
45 static u32 i915_gem_get_seqno(struct drm_device *dev)
46 {
47         drm_i915_private_t *dev_priv = dev->dev_private;
48         u32 seqno;
49
50         seqno = dev_priv->next_seqno;
51
52         /* reserve 0 for non-seqno */
53         if (++dev_priv->next_seqno == 0)
54                 dev_priv->next_seqno = 1;
55
56         return seqno;
57 }
58
59 static int
60 render_ring_flush(struct intel_ring_buffer *ring,
61                   u32   invalidate_domains,
62                   u32   flush_domains)
63 {
64         struct drm_device *dev = ring->dev;
65         drm_i915_private_t *dev_priv = dev->dev_private;
66         u32 cmd;
67         int ret;
68
69 #if WATCH_EXEC
70         DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
71                   invalidate_domains, flush_domains);
72 #endif
73
74         trace_i915_gem_request_flush(dev, dev_priv->next_seqno,
75                                      invalidate_domains, flush_domains);
76
77         if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
78                 /*
79                  * read/write caches:
80                  *
81                  * I915_GEM_DOMAIN_RENDER is always invalidated, but is
82                  * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
83                  * also flushed at 2d versus 3d pipeline switches.
84                  *
85                  * read-only caches:
86                  *
87                  * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
88                  * MI_READ_FLUSH is set, and is always flushed on 965.
89                  *
90                  * I915_GEM_DOMAIN_COMMAND may not exist?
91                  *
92                  * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
93                  * invalidated when MI_EXE_FLUSH is set.
94                  *
95                  * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
96                  * invalidated with every MI_FLUSH.
97                  *
98                  * TLBs:
99                  *
100                  * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
101                  * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
102                  * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
103                  * are flushed at any MI_FLUSH.
104                  */
105
106                 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
107                 if ((invalidate_domains|flush_domains) &
108                     I915_GEM_DOMAIN_RENDER)
109                         cmd &= ~MI_NO_WRITE_FLUSH;
110                 if (INTEL_INFO(dev)->gen < 4) {
111                         /*
112                          * On the 965, the sampler cache always gets flushed
113                          * and this bit is reserved.
114                          */
115                         if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
116                                 cmd |= MI_READ_FLUSH;
117                 }
118                 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
119                         cmd |= MI_EXE_FLUSH;
120
121                 if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
122                     (IS_G4X(dev) || IS_GEN5(dev)))
123                         cmd |= MI_INVALIDATE_ISP;
124
125 #if WATCH_EXEC
126                 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
127 #endif
128                 ret = intel_ring_begin(ring, 2);
129                 if (ret)
130                         return ret;
131
132                 intel_ring_emit(ring, cmd);
133                 intel_ring_emit(ring, MI_NOOP);
134                 intel_ring_advance(ring);
135         }
136
137         return 0;
138 }
139
140 static void ring_write_tail(struct intel_ring_buffer *ring,
141                             u32 value)
142 {
143         drm_i915_private_t *dev_priv = ring->dev->dev_private;
144         I915_WRITE_TAIL(ring, value);
145 }
146
147 u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
148 {
149         drm_i915_private_t *dev_priv = ring->dev->dev_private;
150         u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
151                         RING_ACTHD(ring->mmio_base) : ACTHD;
152
153         return I915_READ(acthd_reg);
154 }
155
156 static int init_ring_common(struct intel_ring_buffer *ring)
157 {
158         drm_i915_private_t *dev_priv = ring->dev->dev_private;
159         struct drm_i915_gem_object *obj = ring->obj;
160         u32 head;
161
162         /* Stop the ring if it's running. */
163         I915_WRITE_CTL(ring, 0);
164         I915_WRITE_HEAD(ring, 0);
165         ring->write_tail(ring, 0);
166
167         /* Initialize the ring. */
168         I915_WRITE_START(ring, obj->gtt_offset);
169         head = I915_READ_HEAD(ring) & HEAD_ADDR;
170
171         /* G45 ring initialization fails to reset head to zero */
172         if (head != 0) {
173                 DRM_DEBUG_KMS("%s head not reset to zero "
174                               "ctl %08x head %08x tail %08x start %08x\n",
175                               ring->name,
176                               I915_READ_CTL(ring),
177                               I915_READ_HEAD(ring),
178                               I915_READ_TAIL(ring),
179                               I915_READ_START(ring));
180
181                 I915_WRITE_HEAD(ring, 0);
182
183                 if (I915_READ_HEAD(ring) & HEAD_ADDR) {
184                         DRM_ERROR("failed to set %s head to zero "
185                                   "ctl %08x head %08x tail %08x start %08x\n",
186                                   ring->name,
187                                   I915_READ_CTL(ring),
188                                   I915_READ_HEAD(ring),
189                                   I915_READ_TAIL(ring),
190                                   I915_READ_START(ring));
191                 }
192         }
193
194         I915_WRITE_CTL(ring,
195                         ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
196                         | RING_REPORT_64K | RING_VALID);
197
198         /* If the head is still not zero, the ring is dead */
199         if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
200             I915_READ_START(ring) != obj->gtt_offset ||
201             (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
202                 DRM_ERROR("%s initialization failed "
203                                 "ctl %08x head %08x tail %08x start %08x\n",
204                                 ring->name,
205                                 I915_READ_CTL(ring),
206                                 I915_READ_HEAD(ring),
207                                 I915_READ_TAIL(ring),
208                                 I915_READ_START(ring));
209                 return -EIO;
210         }
211
212         if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
213                 i915_kernel_lost_context(ring->dev);
214         else {
215                 ring->head = I915_READ_HEAD(ring);
216                 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
217                 ring->space = ring_space(ring);
218         }
219
220         return 0;
221 }
222
223 /*
224  * 965+ support PIPE_CONTROL commands, which provide finer grained control
225  * over cache flushing.
226  */
227 struct pipe_control {
228         struct drm_i915_gem_object *obj;
229         volatile u32 *cpu_page;
230         u32 gtt_offset;
231 };
232
233 static int
234 init_pipe_control(struct intel_ring_buffer *ring)
235 {
236         struct pipe_control *pc;
237         struct drm_i915_gem_object *obj;
238         int ret;
239
240         if (ring->private)
241                 return 0;
242
243         pc = kmalloc(sizeof(*pc), GFP_KERNEL);
244         if (!pc)
245                 return -ENOMEM;
246
247         obj = i915_gem_alloc_object(ring->dev, 4096);
248         if (obj == NULL) {
249                 DRM_ERROR("Failed to allocate seqno page\n");
250                 ret = -ENOMEM;
251                 goto err;
252         }
253         obj->agp_type = AGP_USER_CACHED_MEMORY;
254
255         ret = i915_gem_object_pin(obj, 4096, true);
256         if (ret)
257                 goto err_unref;
258
259         pc->gtt_offset = obj->gtt_offset;
260         pc->cpu_page =  kmap(obj->pages[0]);
261         if (pc->cpu_page == NULL)
262                 goto err_unpin;
263
264         pc->obj = obj;
265         ring->private = pc;
266         return 0;
267
268 err_unpin:
269         i915_gem_object_unpin(obj);
270 err_unref:
271         drm_gem_object_unreference(&obj->base);
272 err:
273         kfree(pc);
274         return ret;
275 }
276
277 static void
278 cleanup_pipe_control(struct intel_ring_buffer *ring)
279 {
280         struct pipe_control *pc = ring->private;
281         struct drm_i915_gem_object *obj;
282
283         if (!ring->private)
284                 return;
285
286         obj = pc->obj;
287         kunmap(obj->pages[0]);
288         i915_gem_object_unpin(obj);
289         drm_gem_object_unreference(&obj->base);
290
291         kfree(pc);
292         ring->private = NULL;
293 }
294
295 static int init_render_ring(struct intel_ring_buffer *ring)
296 {
297         struct drm_device *dev = ring->dev;
298         struct drm_i915_private *dev_priv = dev->dev_private;
299         int ret = init_ring_common(ring);
300
301         if (INTEL_INFO(dev)->gen > 3) {
302                 int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
303                 if (IS_GEN6(dev))
304                         mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
305                 I915_WRITE(MI_MODE, mode);
306         }
307
308         if (INTEL_INFO(dev)->gen >= 6) {
309         } else if (IS_GEN5(dev)) {
310                 ret = init_pipe_control(ring);
311                 if (ret)
312                         return ret;
313         }
314
315         return ret;
316 }
317
318 static void render_ring_cleanup(struct intel_ring_buffer *ring)
319 {
320         if (!ring->private)
321                 return;
322
323         cleanup_pipe_control(ring);
324 }
325
326 static void
327 update_semaphore(struct intel_ring_buffer *ring, int i, u32 seqno)
328 {
329         struct drm_device *dev = ring->dev;
330         struct drm_i915_private *dev_priv = dev->dev_private;
331         int id;
332
333         /*
334          * cs -> 1 = vcs, 0 = bcs
335          * vcs -> 1 = bcs, 0 = cs,
336          * bcs -> 1 = cs, 0 = vcs.
337          */
338         id = ring - dev_priv->ring;
339         id += 2 - i;
340         id %= 3;
341
342         intel_ring_emit(ring,
343                         MI_SEMAPHORE_MBOX |
344                         MI_SEMAPHORE_REGISTER |
345                         MI_SEMAPHORE_UPDATE);
346         intel_ring_emit(ring, seqno);
347         intel_ring_emit(ring,
348                         RING_SYNC_0(dev_priv->ring[id].mmio_base) + 4*i);
349 }
350
351 static int
352 gen6_add_request(struct intel_ring_buffer *ring,
353                  u32 *result)
354 {
355         u32 seqno;
356         int ret;
357
358         ret = intel_ring_begin(ring, 10);
359         if (ret)
360                 return ret;
361
362         seqno = i915_gem_get_seqno(ring->dev);
363         update_semaphore(ring, 0, seqno);
364         update_semaphore(ring, 1, seqno);
365
366         intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
367         intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
368         intel_ring_emit(ring, seqno);
369         intel_ring_emit(ring, MI_USER_INTERRUPT);
370         intel_ring_advance(ring);
371
372         *result = seqno;
373         return 0;
374 }
375
376 int
377 intel_ring_sync(struct intel_ring_buffer *ring,
378                 struct intel_ring_buffer *to,
379                 u32 seqno)
380 {
381         int ret;
382
383         ret = intel_ring_begin(ring, 4);
384         if (ret)
385                 return ret;
386
387         intel_ring_emit(ring,
388                         MI_SEMAPHORE_MBOX |
389                         MI_SEMAPHORE_REGISTER |
390                         intel_ring_sync_index(ring, to) << 17 |
391                         MI_SEMAPHORE_COMPARE);
392         intel_ring_emit(ring, seqno);
393         intel_ring_emit(ring, 0);
394         intel_ring_emit(ring, MI_NOOP);
395         intel_ring_advance(ring);
396
397         return 0;
398 }
399
400 #define PIPE_CONTROL_FLUSH(ring__, addr__)                                      \
401 do {                                                                    \
402         intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |           \
403                  PIPE_CONTROL_DEPTH_STALL | 2);                         \
404         intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT);                    \
405         intel_ring_emit(ring__, 0);                                                     \
406         intel_ring_emit(ring__, 0);                                                     \
407 } while (0)
408
409 static int
410 pc_render_add_request(struct intel_ring_buffer *ring,
411                       u32 *result)
412 {
413         struct drm_device *dev = ring->dev;
414         u32 seqno = i915_gem_get_seqno(dev);
415         struct pipe_control *pc = ring->private;
416         u32 scratch_addr = pc->gtt_offset + 128;
417         int ret;
418
419         /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
420          * incoherent with writes to memory, i.e. completely fubar,
421          * so we need to use PIPE_NOTIFY instead.
422          *
423          * However, we also need to workaround the qword write
424          * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
425          * memory before requesting an interrupt.
426          */
427         ret = intel_ring_begin(ring, 32);
428         if (ret)
429                 return ret;
430
431         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
432                         PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
433         intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
434         intel_ring_emit(ring, seqno);
435         intel_ring_emit(ring, 0);
436         PIPE_CONTROL_FLUSH(ring, scratch_addr);
437         scratch_addr += 128; /* write to separate cachelines */
438         PIPE_CONTROL_FLUSH(ring, scratch_addr);
439         scratch_addr += 128;
440         PIPE_CONTROL_FLUSH(ring, scratch_addr);
441         scratch_addr += 128;
442         PIPE_CONTROL_FLUSH(ring, scratch_addr);
443         scratch_addr += 128;
444         PIPE_CONTROL_FLUSH(ring, scratch_addr);
445         scratch_addr += 128;
446         PIPE_CONTROL_FLUSH(ring, scratch_addr);
447         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
448                         PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
449                         PIPE_CONTROL_NOTIFY);
450         intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
451         intel_ring_emit(ring, seqno);
452         intel_ring_emit(ring, 0);
453         intel_ring_advance(ring);
454
455         *result = seqno;
456         return 0;
457 }
458
459 static int
460 render_ring_add_request(struct intel_ring_buffer *ring,
461                         u32 *result)
462 {
463         struct drm_device *dev = ring->dev;
464         u32 seqno = i915_gem_get_seqno(dev);
465         int ret;
466
467         ret = intel_ring_begin(ring, 4);
468         if (ret)
469                 return ret;
470
471         intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
472         intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
473         intel_ring_emit(ring, seqno);
474         intel_ring_emit(ring, MI_USER_INTERRUPT);
475         intel_ring_advance(ring);
476
477         *result = seqno;
478         return 0;
479 }
480
481 static u32
482 ring_get_seqno(struct intel_ring_buffer *ring)
483 {
484         return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
485 }
486
487 static u32
488 pc_render_get_seqno(struct intel_ring_buffer *ring)
489 {
490         struct pipe_control *pc = ring->private;
491         return pc->cpu_page[0];
492 }
493
494 static void
495 ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
496 {
497         dev_priv->gt_irq_mask &= ~mask;
498         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
499         POSTING_READ(GTIMR);
500 }
501
502 static void
503 ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
504 {
505         dev_priv->gt_irq_mask |= mask;
506         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
507         POSTING_READ(GTIMR);
508 }
509
510 static void
511 i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
512 {
513         dev_priv->irq_mask &= ~mask;
514         I915_WRITE(IMR, dev_priv->irq_mask);
515         POSTING_READ(IMR);
516 }
517
518 static void
519 i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
520 {
521         dev_priv->irq_mask |= mask;
522         I915_WRITE(IMR, dev_priv->irq_mask);
523         POSTING_READ(IMR);
524 }
525
526 static bool
527 render_ring_get_irq(struct intel_ring_buffer *ring)
528 {
529         struct drm_device *dev = ring->dev;
530         drm_i915_private_t *dev_priv = dev->dev_private;
531
532         if (!dev->irq_enabled)
533                 return false;
534
535         spin_lock(&ring->irq_lock);
536         if (ring->irq_refcount++ == 0) {
537                 if (HAS_PCH_SPLIT(dev))
538                         ironlake_enable_irq(dev_priv,
539                                             GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
540                 else
541                         i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
542         }
543         spin_unlock(&ring->irq_lock);
544
545         return true;
546 }
547
548 static void
549 render_ring_put_irq(struct intel_ring_buffer *ring)
550 {
551         struct drm_device *dev = ring->dev;
552         drm_i915_private_t *dev_priv = dev->dev_private;
553
554         spin_lock(&ring->irq_lock);
555         if (--ring->irq_refcount == 0) {
556                 if (HAS_PCH_SPLIT(dev))
557                         ironlake_disable_irq(dev_priv,
558                                              GT_USER_INTERRUPT |
559                                              GT_PIPE_NOTIFY);
560                 else
561                         i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
562         }
563         spin_unlock(&ring->irq_lock);
564 }
565
566 void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
567 {
568         drm_i915_private_t *dev_priv = ring->dev->dev_private;
569         u32 mmio = IS_GEN6(ring->dev) ?
570                 RING_HWS_PGA_GEN6(ring->mmio_base) :
571                 RING_HWS_PGA(ring->mmio_base);
572         I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
573         POSTING_READ(mmio);
574 }
575
576 static int
577 bsd_ring_flush(struct intel_ring_buffer *ring,
578                u32     invalidate_domains,
579                u32     flush_domains)
580 {
581         int ret;
582
583         if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
584                 return 0;
585
586         ret = intel_ring_begin(ring, 2);
587         if (ret)
588                 return ret;
589
590         intel_ring_emit(ring, MI_FLUSH);
591         intel_ring_emit(ring, MI_NOOP);
592         intel_ring_advance(ring);
593         return 0;
594 }
595
596 static int
597 ring_add_request(struct intel_ring_buffer *ring,
598                  u32 *result)
599 {
600         u32 seqno;
601         int ret;
602
603         ret = intel_ring_begin(ring, 4);
604         if (ret)
605                 return ret;
606
607         seqno = i915_gem_get_seqno(ring->dev);
608
609         intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
610         intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
611         intel_ring_emit(ring, seqno);
612         intel_ring_emit(ring, MI_USER_INTERRUPT);
613         intel_ring_advance(ring);
614
615         DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
616         *result = seqno;
617         return 0;
618 }
619
620 static bool
621 ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
622 {
623         struct drm_device *dev = ring->dev;
624         drm_i915_private_t *dev_priv = dev->dev_private;
625
626         if (!dev->irq_enabled)
627                return false;
628
629         spin_lock(&ring->irq_lock);
630         if (ring->irq_refcount++ == 0)
631                 ironlake_enable_irq(dev_priv, flag);
632         spin_unlock(&ring->irq_lock);
633
634         return true;
635 }
636
637 static void
638 ring_put_irq(struct intel_ring_buffer *ring, u32 flag)
639 {
640         struct drm_device *dev = ring->dev;
641         drm_i915_private_t *dev_priv = dev->dev_private;
642
643         spin_lock(&ring->irq_lock);
644         if (--ring->irq_refcount == 0)
645                 ironlake_disable_irq(dev_priv, flag);
646         spin_unlock(&ring->irq_lock);
647 }
648
649 static bool
650 gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
651 {
652         struct drm_device *dev = ring->dev;
653         drm_i915_private_t *dev_priv = dev->dev_private;
654
655         if (!dev->irq_enabled)
656                return false;
657
658         spin_lock(&ring->irq_lock);
659         if (ring->irq_refcount++ == 0) {
660                 ring->irq_mask &= ~rflag;
661                 I915_WRITE_IMR(ring, ring->irq_mask);
662                 ironlake_enable_irq(dev_priv, gflag);
663         }
664         spin_unlock(&ring->irq_lock);
665
666         return true;
667 }
668
669 static void
670 gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
671 {
672         struct drm_device *dev = ring->dev;
673         drm_i915_private_t *dev_priv = dev->dev_private;
674
675         spin_lock(&ring->irq_lock);
676         if (--ring->irq_refcount == 0) {
677                 ring->irq_mask |= rflag;
678                 I915_WRITE_IMR(ring, ring->irq_mask);
679                 ironlake_disable_irq(dev_priv, gflag);
680         }
681         spin_unlock(&ring->irq_lock);
682 }
683
684 static bool
685 bsd_ring_get_irq(struct intel_ring_buffer *ring)
686 {
687         return ring_get_irq(ring, GT_BSD_USER_INTERRUPT);
688 }
689 static void
690 bsd_ring_put_irq(struct intel_ring_buffer *ring)
691 {
692         ring_put_irq(ring, GT_BSD_USER_INTERRUPT);
693 }
694
695 static int
696 ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
697 {
698         int ret;
699
700         ret = intel_ring_begin(ring, 2);
701         if (ret)
702                 return ret;
703
704         intel_ring_emit(ring,
705                         MI_BATCH_BUFFER_START | (2 << 6) |
706                         MI_BATCH_NON_SECURE_I965);
707         intel_ring_emit(ring, offset);
708         intel_ring_advance(ring);
709
710         return 0;
711 }
712
713 static int
714 render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
715                                 u32 offset, u32 len)
716 {
717         struct drm_device *dev = ring->dev;
718         drm_i915_private_t *dev_priv = dev->dev_private;
719         int ret;
720
721         trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1);
722
723         if (IS_I830(dev) || IS_845G(dev)) {
724                 ret = intel_ring_begin(ring, 4);
725                 if (ret)
726                         return ret;
727
728                 intel_ring_emit(ring, MI_BATCH_BUFFER);
729                 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
730                 intel_ring_emit(ring, offset + len - 8);
731                 intel_ring_emit(ring, 0);
732         } else {
733                 ret = intel_ring_begin(ring, 2);
734                 if (ret)
735                         return ret;
736
737                 if (INTEL_INFO(dev)->gen >= 4) {
738                         intel_ring_emit(ring,
739                                         MI_BATCH_BUFFER_START | (2 << 6) |
740                                         MI_BATCH_NON_SECURE_I965);
741                         intel_ring_emit(ring, offset);
742                 } else {
743                         intel_ring_emit(ring,
744                                         MI_BATCH_BUFFER_START | (2 << 6));
745                         intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
746                 }
747         }
748         intel_ring_advance(ring);
749
750         return 0;
751 }
752
753 static void cleanup_status_page(struct intel_ring_buffer *ring)
754 {
755         drm_i915_private_t *dev_priv = ring->dev->dev_private;
756         struct drm_i915_gem_object *obj;
757
758         obj = ring->status_page.obj;
759         if (obj == NULL)
760                 return;
761
762         kunmap(obj->pages[0]);
763         i915_gem_object_unpin(obj);
764         drm_gem_object_unreference(&obj->base);
765         ring->status_page.obj = NULL;
766
767         memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
768 }
769
770 static int init_status_page(struct intel_ring_buffer *ring)
771 {
772         struct drm_device *dev = ring->dev;
773         drm_i915_private_t *dev_priv = dev->dev_private;
774         struct drm_i915_gem_object *obj;
775         int ret;
776
777         obj = i915_gem_alloc_object(dev, 4096);
778         if (obj == NULL) {
779                 DRM_ERROR("Failed to allocate status page\n");
780                 ret = -ENOMEM;
781                 goto err;
782         }
783         obj->agp_type = AGP_USER_CACHED_MEMORY;
784
785         ret = i915_gem_object_pin(obj, 4096, true);
786         if (ret != 0) {
787                 goto err_unref;
788         }
789
790         ring->status_page.gfx_addr = obj->gtt_offset;
791         ring->status_page.page_addr = kmap(obj->pages[0]);
792         if (ring->status_page.page_addr == NULL) {
793                 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
794                 goto err_unpin;
795         }
796         ring->status_page.obj = obj;
797         memset(ring->status_page.page_addr, 0, PAGE_SIZE);
798
799         intel_ring_setup_status_page(ring);
800         DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
801                         ring->name, ring->status_page.gfx_addr);
802
803         return 0;
804
805 err_unpin:
806         i915_gem_object_unpin(obj);
807 err_unref:
808         drm_gem_object_unreference(&obj->base);
809 err:
810         return ret;
811 }
812
813 int intel_init_ring_buffer(struct drm_device *dev,
814                            struct intel_ring_buffer *ring)
815 {
816         struct drm_i915_gem_object *obj;
817         int ret;
818
819         ring->dev = dev;
820         INIT_LIST_HEAD(&ring->active_list);
821         INIT_LIST_HEAD(&ring->request_list);
822         INIT_LIST_HEAD(&ring->gpu_write_list);
823
824         spin_lock_init(&ring->irq_lock);
825         ring->irq_mask = ~0;
826
827         if (I915_NEED_GFX_HWS(dev)) {
828                 ret = init_status_page(ring);
829                 if (ret)
830                         return ret;
831         }
832
833         obj = i915_gem_alloc_object(dev, ring->size);
834         if (obj == NULL) {
835                 DRM_ERROR("Failed to allocate ringbuffer\n");
836                 ret = -ENOMEM;
837                 goto err_hws;
838         }
839
840         ring->obj = obj;
841
842         ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
843         if (ret)
844                 goto err_unref;
845
846         ring->map.size = ring->size;
847         ring->map.offset = dev->agp->base + obj->gtt_offset;
848         ring->map.type = 0;
849         ring->map.flags = 0;
850         ring->map.mtrr = 0;
851
852         drm_core_ioremap_wc(&ring->map, dev);
853         if (ring->map.handle == NULL) {
854                 DRM_ERROR("Failed to map ringbuffer.\n");
855                 ret = -EINVAL;
856                 goto err_unpin;
857         }
858
859         ring->virtual_start = ring->map.handle;
860         ret = ring->init(ring);
861         if (ret)
862                 goto err_unmap;
863
864         /* Workaround an erratum on the i830 which causes a hang if
865          * the TAIL pointer points to within the last 2 cachelines
866          * of the buffer.
867          */
868         ring->effective_size = ring->size;
869         if (IS_I830(ring->dev))
870                 ring->effective_size -= 128;
871
872         return 0;
873
874 err_unmap:
875         drm_core_ioremapfree(&ring->map, dev);
876 err_unpin:
877         i915_gem_object_unpin(obj);
878 err_unref:
879         drm_gem_object_unreference(&obj->base);
880         ring->obj = NULL;
881 err_hws:
882         cleanup_status_page(ring);
883         return ret;
884 }
885
886 void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
887 {
888         struct drm_i915_private *dev_priv;
889         int ret;
890
891         if (ring->obj == NULL)
892                 return;
893
894         /* Disable the ring buffer. The ring must be idle at this point */
895         dev_priv = ring->dev->dev_private;
896         ret = intel_wait_ring_buffer(ring, ring->size - 8);
897         I915_WRITE_CTL(ring, 0);
898
899         drm_core_ioremapfree(&ring->map, ring->dev);
900
901         i915_gem_object_unpin(ring->obj);
902         drm_gem_object_unreference(&ring->obj->base);
903         ring->obj = NULL;
904
905         if (ring->cleanup)
906                 ring->cleanup(ring);
907
908         cleanup_status_page(ring);
909 }
910
911 static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
912 {
913         unsigned int *virt;
914         int rem = ring->size - ring->tail;
915
916         if (ring->space < rem) {
917                 int ret = intel_wait_ring_buffer(ring, rem);
918                 if (ret)
919                         return ret;
920         }
921
922         virt = (unsigned int *)(ring->virtual_start + ring->tail);
923         rem /= 8;
924         while (rem--) {
925                 *virt++ = MI_NOOP;
926                 *virt++ = MI_NOOP;
927         }
928
929         ring->tail = 0;
930         ring->space = ring_space(ring);
931
932         return 0;
933 }
934
935 int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
936 {
937         struct drm_device *dev = ring->dev;
938         struct drm_i915_private *dev_priv = dev->dev_private;
939         unsigned long end;
940         u32 head;
941
942         /* If the reported head position has wrapped or hasn't advanced,
943          * fallback to the slow and accurate path.
944          */
945         head = intel_read_status_page(ring, 4);
946         if (head > ring->head) {
947                 ring->head = head;
948                 ring->space = ring_space(ring);
949                 if (ring->space >= n)
950                         return 0;
951         }
952
953         trace_i915_ring_wait_begin (dev);
954         end = jiffies + 3 * HZ;
955         do {
956                 ring->head = I915_READ_HEAD(ring);
957                 ring->space = ring_space(ring);
958                 if (ring->space >= n) {
959                         trace_i915_ring_wait_end(dev);
960                         return 0;
961                 }
962
963                 if (dev->primary->master) {
964                         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
965                         if (master_priv->sarea_priv)
966                                 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
967                 }
968
969                 msleep(1);
970                 if (atomic_read(&dev_priv->mm.wedged))
971                         return -EAGAIN;
972         } while (!time_after(jiffies, end));
973         trace_i915_ring_wait_end (dev);
974         return -EBUSY;
975 }
976
977 int intel_ring_begin(struct intel_ring_buffer *ring,
978                      int num_dwords)
979 {
980         int n = 4*num_dwords;
981         int ret;
982
983         if (unlikely(ring->tail + n > ring->effective_size)) {
984                 ret = intel_wrap_ring_buffer(ring);
985                 if (unlikely(ret))
986                         return ret;
987         }
988
989         if (unlikely(ring->space < n)) {
990                 ret = intel_wait_ring_buffer(ring, n);
991                 if (unlikely(ret))
992                         return ret;
993         }
994
995         ring->space -= n;
996         return 0;
997 }
998
999 void intel_ring_advance(struct intel_ring_buffer *ring)
1000 {
1001         ring->tail &= ring->size - 1;
1002         ring->write_tail(ring, ring->tail);
1003 }
1004
1005 static const struct intel_ring_buffer render_ring = {
1006         .name                   = "render ring",
1007         .id                     = RING_RENDER,
1008         .mmio_base              = RENDER_RING_BASE,
1009         .size                   = 32 * PAGE_SIZE,
1010         .init                   = init_render_ring,
1011         .write_tail             = ring_write_tail,
1012         .flush                  = render_ring_flush,
1013         .add_request            = render_ring_add_request,
1014         .get_seqno              = ring_get_seqno,
1015         .irq_get                = render_ring_get_irq,
1016         .irq_put                = render_ring_put_irq,
1017         .dispatch_execbuffer    = render_ring_dispatch_execbuffer,
1018        .cleanup                 = render_ring_cleanup,
1019 };
1020
1021 /* ring buffer for bit-stream decoder */
1022
1023 static const struct intel_ring_buffer bsd_ring = {
1024         .name                   = "bsd ring",
1025         .id                     = RING_BSD,
1026         .mmio_base              = BSD_RING_BASE,
1027         .size                   = 32 * PAGE_SIZE,
1028         .init                   = init_ring_common,
1029         .write_tail             = ring_write_tail,
1030         .flush                  = bsd_ring_flush,
1031         .add_request            = ring_add_request,
1032         .get_seqno              = ring_get_seqno,
1033         .irq_get                = bsd_ring_get_irq,
1034         .irq_put                = bsd_ring_put_irq,
1035         .dispatch_execbuffer    = ring_dispatch_execbuffer,
1036 };
1037
1038
1039 static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
1040                                      u32 value)
1041 {
1042        drm_i915_private_t *dev_priv = ring->dev->dev_private;
1043
1044        /* Every tail move must follow the sequence below */
1045        I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1046                GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1047                GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
1048        I915_WRITE(GEN6_BSD_RNCID, 0x0);
1049
1050        if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
1051                                GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
1052                        50))
1053                DRM_ERROR("timed out waiting for IDLE Indicator\n");
1054
1055        I915_WRITE_TAIL(ring, value);
1056        I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1057                GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1058                GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
1059 }
1060
1061 static int gen6_ring_flush(struct intel_ring_buffer *ring,
1062                            u32 invalidate, u32 flush)
1063 {
1064         uint32_t cmd;
1065         int ret;
1066
1067         if (((invalidate | flush) & I915_GEM_GPU_DOMAINS) == 0)
1068                 return 0;
1069
1070         ret = intel_ring_begin(ring, 4);
1071         if (ret)
1072                 return ret;
1073
1074         cmd = MI_FLUSH_DW;
1075         if (invalidate & I915_GEM_GPU_DOMAINS)
1076                 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
1077         intel_ring_emit(ring, cmd);
1078         intel_ring_emit(ring, 0);
1079         intel_ring_emit(ring, 0);
1080         intel_ring_emit(ring, MI_NOOP);
1081         intel_ring_advance(ring);
1082         return 0;
1083 }
1084
1085 static int
1086 gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1087                               u32 offset, u32 len)
1088 {
1089        int ret;
1090
1091        ret = intel_ring_begin(ring, 2);
1092        if (ret)
1093                return ret;
1094
1095        intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
1096        /* bit0-7 is the length on GEN6+ */
1097        intel_ring_emit(ring, offset);
1098        intel_ring_advance(ring);
1099
1100        return 0;
1101 }
1102
1103 static bool
1104 gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
1105 {
1106         return gen6_ring_get_irq(ring,
1107                                  GT_USER_INTERRUPT,
1108                                  GEN6_RENDER_USER_INTERRUPT);
1109 }
1110
1111 static void
1112 gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
1113 {
1114         return gen6_ring_put_irq(ring,
1115                                  GT_USER_INTERRUPT,
1116                                  GEN6_RENDER_USER_INTERRUPT);
1117 }
1118
1119 static bool
1120 gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
1121 {
1122         return gen6_ring_get_irq(ring,
1123                                  GT_GEN6_BSD_USER_INTERRUPT,
1124                                  GEN6_BSD_USER_INTERRUPT);
1125 }
1126
1127 static void
1128 gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
1129 {
1130         return gen6_ring_put_irq(ring,
1131                                  GT_GEN6_BSD_USER_INTERRUPT,
1132                                  GEN6_BSD_USER_INTERRUPT);
1133 }
1134
1135 /* ring buffer for Video Codec for Gen6+ */
1136 static const struct intel_ring_buffer gen6_bsd_ring = {
1137         .name                   = "gen6 bsd ring",
1138         .id                     = RING_BSD,
1139         .mmio_base              = GEN6_BSD_RING_BASE,
1140         .size                   = 32 * PAGE_SIZE,
1141         .init                   = init_ring_common,
1142         .write_tail             = gen6_bsd_ring_write_tail,
1143         .flush                  = gen6_ring_flush,
1144         .add_request            = gen6_add_request,
1145         .get_seqno              = ring_get_seqno,
1146         .irq_get                = gen6_bsd_ring_get_irq,
1147         .irq_put                = gen6_bsd_ring_put_irq,
1148         .dispatch_execbuffer    = gen6_ring_dispatch_execbuffer,
1149 };
1150
1151 /* Blitter support (SandyBridge+) */
1152
1153 static bool
1154 blt_ring_get_irq(struct intel_ring_buffer *ring)
1155 {
1156         return gen6_ring_get_irq(ring,
1157                                  GT_BLT_USER_INTERRUPT,
1158                                  GEN6_BLITTER_USER_INTERRUPT);
1159 }
1160
1161 static void
1162 blt_ring_put_irq(struct intel_ring_buffer *ring)
1163 {
1164         gen6_ring_put_irq(ring,
1165                           GT_BLT_USER_INTERRUPT,
1166                           GEN6_BLITTER_USER_INTERRUPT);
1167 }
1168
1169
1170 /* Workaround for some stepping of SNB,
1171  * each time when BLT engine ring tail moved,
1172  * the first command in the ring to be parsed
1173  * should be MI_BATCH_BUFFER_START
1174  */
1175 #define NEED_BLT_WORKAROUND(dev) \
1176         (IS_GEN6(dev) && (dev->pdev->revision < 8))
1177
1178 static inline struct drm_i915_gem_object *
1179 to_blt_workaround(struct intel_ring_buffer *ring)
1180 {
1181         return ring->private;
1182 }
1183
1184 static int blt_ring_init(struct intel_ring_buffer *ring)
1185 {
1186         if (NEED_BLT_WORKAROUND(ring->dev)) {
1187                 struct drm_i915_gem_object *obj;
1188                 u32 *ptr;
1189                 int ret;
1190
1191                 obj = i915_gem_alloc_object(ring->dev, 4096);
1192                 if (obj == NULL)
1193                         return -ENOMEM;
1194
1195                 ret = i915_gem_object_pin(obj, 4096, true);
1196                 if (ret) {
1197                         drm_gem_object_unreference(&obj->base);
1198                         return ret;
1199                 }
1200
1201                 ptr = kmap(obj->pages[0]);
1202                 *ptr++ = MI_BATCH_BUFFER_END;
1203                 *ptr++ = MI_NOOP;
1204                 kunmap(obj->pages[0]);
1205
1206                 ret = i915_gem_object_set_to_gtt_domain(obj, false);
1207                 if (ret) {
1208                         i915_gem_object_unpin(obj);
1209                         drm_gem_object_unreference(&obj->base);
1210                         return ret;
1211                 }
1212
1213                 ring->private = obj;
1214         }
1215
1216         return init_ring_common(ring);
1217 }
1218
1219 static int blt_ring_begin(struct intel_ring_buffer *ring,
1220                           int num_dwords)
1221 {
1222         if (ring->private) {
1223                 int ret = intel_ring_begin(ring, num_dwords+2);
1224                 if (ret)
1225                         return ret;
1226
1227                 intel_ring_emit(ring, MI_BATCH_BUFFER_START);
1228                 intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset);
1229
1230                 return 0;
1231         } else
1232                 return intel_ring_begin(ring, 4);
1233 }
1234
1235 static int blt_ring_flush(struct intel_ring_buffer *ring,
1236                           u32 invalidate, u32 flush)
1237 {
1238         uint32_t cmd;
1239         int ret;
1240
1241         if (((invalidate | flush) & I915_GEM_DOMAIN_RENDER) == 0)
1242                 return 0;
1243
1244         ret = blt_ring_begin(ring, 4);
1245         if (ret)
1246                 return ret;
1247
1248         cmd = MI_FLUSH_DW;
1249         if (invalidate & I915_GEM_DOMAIN_RENDER)
1250                 cmd |= MI_INVALIDATE_TLB;
1251         intel_ring_emit(ring, cmd);
1252         intel_ring_emit(ring, 0);
1253         intel_ring_emit(ring, 0);
1254         intel_ring_emit(ring, MI_NOOP);
1255         intel_ring_advance(ring);
1256         return 0;
1257 }
1258
1259 static void blt_ring_cleanup(struct intel_ring_buffer *ring)
1260 {
1261         if (!ring->private)
1262                 return;
1263
1264         i915_gem_object_unpin(ring->private);
1265         drm_gem_object_unreference(ring->private);
1266         ring->private = NULL;
1267 }
1268
1269 static const struct intel_ring_buffer gen6_blt_ring = {
1270        .name                    = "blt ring",
1271        .id                      = RING_BLT,
1272        .mmio_base               = BLT_RING_BASE,
1273        .size                    = 32 * PAGE_SIZE,
1274        .init                    = blt_ring_init,
1275        .write_tail              = ring_write_tail,
1276        .flush                   = blt_ring_flush,
1277        .add_request             = gen6_add_request,
1278        .get_seqno               = ring_get_seqno,
1279        .irq_get                 = blt_ring_get_irq,
1280        .irq_put                 = blt_ring_put_irq,
1281        .dispatch_execbuffer     = gen6_ring_dispatch_execbuffer,
1282        .cleanup                 = blt_ring_cleanup,
1283 };
1284
1285 int intel_init_render_ring_buffer(struct drm_device *dev)
1286 {
1287         drm_i915_private_t *dev_priv = dev->dev_private;
1288         struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1289
1290         *ring = render_ring;
1291         if (INTEL_INFO(dev)->gen >= 6) {
1292                 ring->add_request = gen6_add_request;
1293                 ring->irq_get = gen6_render_ring_get_irq;
1294                 ring->irq_put = gen6_render_ring_put_irq;
1295         } else if (IS_GEN5(dev)) {
1296                 ring->add_request = pc_render_add_request;
1297                 ring->get_seqno = pc_render_get_seqno;
1298         }
1299
1300         if (!I915_NEED_GFX_HWS(dev)) {
1301                 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1302                 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1303         }
1304
1305         return intel_init_ring_buffer(dev, ring);
1306 }
1307
1308 int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1309 {
1310         drm_i915_private_t *dev_priv = dev->dev_private;
1311         struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1312
1313         *ring = render_ring;
1314         if (INTEL_INFO(dev)->gen >= 6) {
1315                 ring->add_request = gen6_add_request;
1316                 ring->irq_get = gen6_render_ring_get_irq;
1317                 ring->irq_put = gen6_render_ring_put_irq;
1318         } else if (IS_GEN5(dev)) {
1319                 ring->add_request = pc_render_add_request;
1320                 ring->get_seqno = pc_render_get_seqno;
1321         }
1322
1323         ring->dev = dev;
1324         INIT_LIST_HEAD(&ring->active_list);
1325         INIT_LIST_HEAD(&ring->request_list);
1326         INIT_LIST_HEAD(&ring->gpu_write_list);
1327
1328         ring->size = size;
1329         ring->effective_size = ring->size;
1330         if (IS_I830(ring->dev))
1331                 ring->effective_size -= 128;
1332
1333         ring->map.offset = start;
1334         ring->map.size = size;
1335         ring->map.type = 0;
1336         ring->map.flags = 0;
1337         ring->map.mtrr = 0;
1338
1339         drm_core_ioremap_wc(&ring->map, dev);
1340         if (ring->map.handle == NULL) {
1341                 DRM_ERROR("can not ioremap virtual address for"
1342                           " ring buffer\n");
1343                 return -ENOMEM;
1344         }
1345
1346         ring->virtual_start = (void __force __iomem *)ring->map.handle;
1347         return 0;
1348 }
1349
1350 int intel_init_bsd_ring_buffer(struct drm_device *dev)
1351 {
1352         drm_i915_private_t *dev_priv = dev->dev_private;
1353         struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
1354
1355         if (IS_GEN6(dev))
1356                 *ring = gen6_bsd_ring;
1357         else
1358                 *ring = bsd_ring;
1359
1360         return intel_init_ring_buffer(dev, ring);
1361 }
1362
1363 int intel_init_blt_ring_buffer(struct drm_device *dev)
1364 {
1365         drm_i915_private_t *dev_priv = dev->dev_private;
1366         struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
1367
1368         *ring = gen6_blt_ring;
1369
1370         return intel_init_ring_buffer(dev, ring);
1371 }