drm: updated DRM map patch for 32/64 bit systems
[linux-2.6.git] / drivers / char / drm / mga_dma.c
1 /* mga_dma.c -- DMA support for mga g200/g400 -*- linux-c -*-
2  * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
3  *
4  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the "Software"),
10  * to deal in the Software without restriction, including without limitation
11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12  * and/or sell copies of the Software, and to permit persons to whom the
13  * Software is furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the next
16  * paragraph) shall be included in all copies or substantial portions of the
17  * Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25  * DEALINGS IN THE SOFTWARE.
26  */
27
28 /**
29  * \file mga_dma.c
30  * DMA support for MGA G200 / G400.
31  * 
32  * \author Rickard E. (Rik) Faith <faith@valinux.com>
33  * \author Jeff Hartmann <jhartmann@valinux.com>
34  * \author Keith Whitwell <keith@tungstengraphics.com>
35  * \author Gareth Hughes <gareth@valinux.com>
36  */
37
38 #include "drmP.h"
39 #include "drm.h"
40 #include "drm_sarea.h"
41 #include "mga_drm.h"
42 #include "mga_drv.h"
43
44 #define MGA_DEFAULT_USEC_TIMEOUT        10000
45 #define MGA_FREELIST_DEBUG              0
46
47 static int mga_do_cleanup_dma( drm_device_t *dev );
48
49 /* ================================================================
50  * Engine control
51  */
52
53 int mga_do_wait_for_idle( drm_mga_private_t *dev_priv )
54 {
55         u32 status = 0;
56         int i;
57         DRM_DEBUG( "\n" );
58
59         for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) {
60                 status = MGA_READ( MGA_STATUS ) & MGA_ENGINE_IDLE_MASK;
61                 if ( status == MGA_ENDPRDMASTS ) {
62                         MGA_WRITE8( MGA_CRTC_INDEX, 0 );
63                         return 0;
64                 }
65                 DRM_UDELAY( 1 );
66         }
67
68 #if MGA_DMA_DEBUG
69         DRM_ERROR( "failed!\n" );
70         DRM_INFO( "   status=0x%08x\n", status );
71 #endif
72         return DRM_ERR(EBUSY);
73 }
74
75 static int mga_do_dma_reset( drm_mga_private_t *dev_priv )
76 {
77         drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
78         drm_mga_primary_buffer_t *primary = &dev_priv->prim;
79
80         DRM_DEBUG( "\n" );
81
82         /* The primary DMA stream should look like new right about now.
83          */
84         primary->tail = 0;
85         primary->space = primary->size;
86         primary->last_flush = 0;
87
88         sarea_priv->last_wrap = 0;
89
90         /* FIXME: Reset counters, buffer ages etc...
91          */
92
93         /* FIXME: What else do we need to reinitialize?  WARP stuff?
94          */
95
96         return 0;
97 }
98
99 /* ================================================================
100  * Primary DMA stream
101  */
102
103 void mga_do_dma_flush( drm_mga_private_t *dev_priv )
104 {
105         drm_mga_primary_buffer_t *primary = &dev_priv->prim;
106         u32 head, tail;
107         u32 status = 0;
108         int i;
109         DMA_LOCALS;
110         DRM_DEBUG( "\n" );
111
112         /* We need to wait so that we can do an safe flush */
113         for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) {
114                 status = MGA_READ( MGA_STATUS ) & MGA_ENGINE_IDLE_MASK;
115                 if ( status == MGA_ENDPRDMASTS ) break;
116                 DRM_UDELAY( 1 );
117         }
118
119         if ( primary->tail == primary->last_flush ) {
120                 DRM_DEBUG( "   bailing out...\n" );
121                 return;
122         }
123
124         tail = primary->tail + dev_priv->primary->offset;
125
126         /* We need to pad the stream between flushes, as the card
127          * actually (partially?) reads the first of these commands.
128          * See page 4-16 in the G400 manual, middle of the page or so.
129          */
130         BEGIN_DMA( 1 );
131
132         DMA_BLOCK( MGA_DMAPAD,  0x00000000,
133                    MGA_DMAPAD,  0x00000000,
134                    MGA_DMAPAD,  0x00000000,
135                    MGA_DMAPAD,  0x00000000 );
136
137         ADVANCE_DMA();
138
139         primary->last_flush = primary->tail;
140
141         head = MGA_READ( MGA_PRIMADDRESS );
142
143         if ( head <= tail ) {
144                 primary->space = primary->size - primary->tail;
145         } else {
146                 primary->space = head - tail;
147         }
148
149         DRM_DEBUG( "   head = 0x%06lx\n", head - dev_priv->primary->offset );
150         DRM_DEBUG( "   tail = 0x%06lx\n", tail - dev_priv->primary->offset );
151         DRM_DEBUG( "  space = 0x%06x\n", primary->space );
152
153         mga_flush_write_combine();
154         MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access);
155
156         DRM_DEBUG( "done.\n" );
157 }
158
159 void mga_do_dma_wrap_start( drm_mga_private_t *dev_priv )
160 {
161         drm_mga_primary_buffer_t *primary = &dev_priv->prim;
162         u32 head, tail;
163         DMA_LOCALS;
164         DRM_DEBUG( "\n" );
165
166         BEGIN_DMA_WRAP();
167
168         DMA_BLOCK( MGA_DMAPAD,  0x00000000,
169                    MGA_DMAPAD,  0x00000000,
170                    MGA_DMAPAD,  0x00000000,
171                    MGA_DMAPAD,  0x00000000 );
172
173         ADVANCE_DMA();
174
175         tail = primary->tail + dev_priv->primary->offset;
176
177         primary->tail = 0;
178         primary->last_flush = 0;
179         primary->last_wrap++;
180
181         head = MGA_READ( MGA_PRIMADDRESS );
182
183         if ( head == dev_priv->primary->offset ) {
184                 primary->space = primary->size;
185         } else {
186                 primary->space = head - dev_priv->primary->offset;
187         }
188
189         DRM_DEBUG( "   head = 0x%06lx\n",
190                   head - dev_priv->primary->offset );
191         DRM_DEBUG( "   tail = 0x%06x\n", primary->tail );
192         DRM_DEBUG( "   wrap = %d\n", primary->last_wrap );
193         DRM_DEBUG( "  space = 0x%06x\n", primary->space );
194
195         mga_flush_write_combine();
196         MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access);
197
198         set_bit( 0, &primary->wrapped );
199         DRM_DEBUG( "done.\n" );
200 }
201
202 void mga_do_dma_wrap_end( drm_mga_private_t *dev_priv )
203 {
204         drm_mga_primary_buffer_t *primary = &dev_priv->prim;
205         drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
206         u32 head = dev_priv->primary->offset;
207         DRM_DEBUG( "\n" );
208
209         sarea_priv->last_wrap++;
210         DRM_DEBUG( "   wrap = %d\n", sarea_priv->last_wrap );
211
212         mga_flush_write_combine();
213         MGA_WRITE( MGA_PRIMADDRESS, head | MGA_DMA_GENERAL );
214
215         clear_bit( 0, &primary->wrapped );
216         DRM_DEBUG( "done.\n" );
217 }
218
219
220 /* ================================================================
221  * Freelist management
222  */
223
224 #define MGA_BUFFER_USED         ~0
225 #define MGA_BUFFER_FREE         0
226
227 #if MGA_FREELIST_DEBUG
228 static void mga_freelist_print( drm_device_t *dev )
229 {
230         drm_mga_private_t *dev_priv = dev->dev_private;
231         drm_mga_freelist_t *entry;
232
233         DRM_INFO( "\n" );
234         DRM_INFO( "current dispatch: last=0x%x done=0x%x\n",
235                   dev_priv->sarea_priv->last_dispatch,
236                   (unsigned int)(MGA_READ( MGA_PRIMADDRESS ) -
237                                  dev_priv->primary->offset) );
238         DRM_INFO( "current freelist:\n" );
239
240         for ( entry = dev_priv->head->next ; entry ; entry = entry->next ) {
241                 DRM_INFO( "   %p   idx=%2d  age=0x%x 0x%06lx\n",
242                           entry, entry->buf->idx, entry->age.head,
243                           entry->age.head - dev_priv->primary->offset );
244         }
245         DRM_INFO( "\n" );
246 }
247 #endif
248
249 static int mga_freelist_init( drm_device_t *dev, drm_mga_private_t *dev_priv )
250 {
251         drm_device_dma_t *dma = dev->dma;
252         drm_buf_t *buf;
253         drm_mga_buf_priv_t *buf_priv;
254         drm_mga_freelist_t *entry;
255         int i;
256         DRM_DEBUG( "count=%d\n", dma->buf_count );
257
258         dev_priv->head = drm_alloc( sizeof(drm_mga_freelist_t),
259                                      DRM_MEM_DRIVER );
260         if ( dev_priv->head == NULL )
261                 return DRM_ERR(ENOMEM);
262
263         memset( dev_priv->head, 0, sizeof(drm_mga_freelist_t) );
264         SET_AGE( &dev_priv->head->age, MGA_BUFFER_USED, 0 );
265
266         for ( i = 0 ; i < dma->buf_count ; i++ ) {
267                 buf = dma->buflist[i];
268                 buf_priv = buf->dev_private;
269
270                 entry = drm_alloc( sizeof(drm_mga_freelist_t),
271                                     DRM_MEM_DRIVER );
272                 if ( entry == NULL )
273                         return DRM_ERR(ENOMEM);
274
275                 memset( entry, 0, sizeof(drm_mga_freelist_t) );
276
277                 entry->next = dev_priv->head->next;
278                 entry->prev = dev_priv->head;
279                 SET_AGE( &entry->age, MGA_BUFFER_FREE, 0 );
280                 entry->buf = buf;
281
282                 if ( dev_priv->head->next != NULL )
283                         dev_priv->head->next->prev = entry;
284                 if ( entry->next == NULL )
285                         dev_priv->tail = entry;
286
287                 buf_priv->list_entry = entry;
288                 buf_priv->discard = 0;
289                 buf_priv->dispatched = 0;
290
291                 dev_priv->head->next = entry;
292         }
293
294         return 0;
295 }
296
297 static void mga_freelist_cleanup( drm_device_t *dev )
298 {
299         drm_mga_private_t *dev_priv = dev->dev_private;
300         drm_mga_freelist_t *entry;
301         drm_mga_freelist_t *next;
302         DRM_DEBUG( "\n" );
303
304         entry = dev_priv->head;
305         while ( entry ) {
306                 next = entry->next;
307                 drm_free( entry, sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER );
308                 entry = next;
309         }
310
311         dev_priv->head = dev_priv->tail = NULL;
312 }
313
314 #if 0
315 /* FIXME: Still needed?
316  */
317 static void mga_freelist_reset( drm_device_t *dev )
318 {
319         drm_device_dma_t *dma = dev->dma;
320         drm_buf_t *buf;
321         drm_mga_buf_priv_t *buf_priv;
322         int i;
323
324         for ( i = 0 ; i < dma->buf_count ; i++ ) {
325                 buf = dma->buflist[i];
326                 buf_priv = buf->dev_private;
327                 SET_AGE( &buf_priv->list_entry->age,
328                          MGA_BUFFER_FREE, 0 );
329         }
330 }
331 #endif
332
333 static drm_buf_t *mga_freelist_get( drm_device_t *dev )
334 {
335         drm_mga_private_t *dev_priv = dev->dev_private;
336         drm_mga_freelist_t *next;
337         drm_mga_freelist_t *prev;
338         drm_mga_freelist_t *tail = dev_priv->tail;
339         u32 head, wrap;
340         DRM_DEBUG( "\n" );
341
342         head = MGA_READ( MGA_PRIMADDRESS );
343         wrap = dev_priv->sarea_priv->last_wrap;
344
345         DRM_DEBUG( "   tail=0x%06lx %d\n",
346                    tail->age.head ?
347                    tail->age.head - dev_priv->primary->offset : 0,
348                    tail->age.wrap );
349         DRM_DEBUG( "   head=0x%06lx %d\n",
350                    head - dev_priv->primary->offset, wrap );
351
352         if ( TEST_AGE( &tail->age, head, wrap ) ) {
353                 prev = dev_priv->tail->prev;
354                 next = dev_priv->tail;
355                 prev->next = NULL;
356                 next->prev = next->next = NULL;
357                 dev_priv->tail = prev;
358                 SET_AGE( &next->age, MGA_BUFFER_USED, 0 );
359                 return next->buf;
360         }
361
362         DRM_DEBUG( "returning NULL!\n" );
363         return NULL;
364 }
365
366 int mga_freelist_put( drm_device_t *dev, drm_buf_t *buf )
367 {
368         drm_mga_private_t *dev_priv = dev->dev_private;
369         drm_mga_buf_priv_t *buf_priv = buf->dev_private;
370         drm_mga_freelist_t *head, *entry, *prev;
371
372         DRM_DEBUG( "age=0x%06lx wrap=%d\n",
373                    buf_priv->list_entry->age.head -
374                    dev_priv->primary->offset,
375                    buf_priv->list_entry->age.wrap );
376
377         entry = buf_priv->list_entry;
378         head = dev_priv->head;
379
380         if ( buf_priv->list_entry->age.head == MGA_BUFFER_USED ) {
381                 SET_AGE( &entry->age, MGA_BUFFER_FREE, 0 );
382                 prev = dev_priv->tail;
383                 prev->next = entry;
384                 entry->prev = prev;
385                 entry->next = NULL;
386         } else {
387                 prev = head->next;
388                 head->next = entry;
389                 prev->prev = entry;
390                 entry->prev = head;
391                 entry->next = prev;
392         }
393
394         return 0;
395 }
396
397
398 /* ================================================================
399  * DMA initialization, cleanup
400  */
401
402
403 int mga_driver_preinit(drm_device_t *dev, unsigned long flags)
404 {
405         drm_mga_private_t * dev_priv;
406
407         dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER);
408         if (!dev_priv)
409                 return DRM_ERR(ENOMEM);
410
411         dev->dev_private = (void *)dev_priv;
412         memset(dev_priv, 0, sizeof(drm_mga_private_t));
413
414         dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT;
415         dev_priv->chipset = flags;
416
417         return 0;
418 }
419
420 /**
421  * Bootstrap the driver for AGP DMA.
422  * 
423  * \todo
424  * Investigate whether there is any benifit to storing the WARP microcode in
425  * AGP memory.  If not, the microcode may as well always be put in PCI
426  * memory.
427  *
428  * \todo
429  * This routine needs to set dma_bs->agp_mode to the mode actually configured
430  * in the hardware.  Looking just at the Linux AGP driver code, I don't see
431  * an easy way to determine this.
432  *
433  * \sa mga_do_dma_bootstrap, mga_do_pci_dma_bootstrap
434  */
435 static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
436                                     drm_mga_dma_bootstrap_t * dma_bs)
437 {
438         drm_mga_private_t * const dev_priv = (drm_mga_private_t *) dev->dev_private;
439         const unsigned int warp_size = mga_warp_microcode_size(dev_priv);
440         int err;
441         unsigned  offset;
442         const unsigned secondary_size = dma_bs->secondary_bin_count
443                 * dma_bs->secondary_bin_size;
444         const unsigned agp_size = (dma_bs->agp_size << 20);
445         drm_buf_desc_t req;
446         drm_agp_mode_t mode;
447         drm_agp_info_t info;
448
449         
450         /* Acquire AGP. */
451         err = drm_agp_acquire(dev);
452         if (err) {
453                 DRM_ERROR("Unable to acquire AGP\n");
454                 return err;
455         }
456
457         err = drm_agp_info(dev, &info);
458         if (err) {
459                 DRM_ERROR("Unable to get AGP info\n");
460                 return err;
461         }
462
463         mode.mode = (info.mode & ~0x07) | dma_bs->agp_mode;
464         err = drm_agp_enable(dev, mode);
465         if (err) {
466                 DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode);
467                 return err;
468         }
469
470
471         /* In addition to the usual AGP mode configuration, the G200 AGP cards
472          * need to have the AGP mode "manually" set.
473          */
474
475         if (dev_priv->chipset == MGA_CARD_TYPE_G200) {
476                 if (mode.mode & 0x02) {
477                         MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_ENABLE);
478                 }
479                 else {
480                         MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_DISABLE);
481                 }
482         }
483
484
485         /* Allocate and bind AGP memory. */
486         dev_priv->agp_pages = agp_size / PAGE_SIZE;
487         dev_priv->agp_mem = drm_alloc_agp( dev, dev_priv->agp_pages, 0 );
488         if (dev_priv->agp_mem == NULL) {
489                 dev_priv->agp_pages = 0;
490                 DRM_ERROR("Unable to allocate %uMB AGP memory\n",
491                           dma_bs->agp_size);
492                 return DRM_ERR(ENOMEM);
493         }
494                 
495         err = drm_bind_agp( dev_priv->agp_mem, 0 );
496         if (err) {
497                 DRM_ERROR("Unable to bind AGP memory\n");
498                 return err;
499         }
500
501         offset = 0;
502         err = drm_addmap( dev, offset, warp_size,
503                           _DRM_AGP, _DRM_READ_ONLY, & dev_priv->warp );
504         if (err) {
505                 DRM_ERROR("Unable to map WARP microcode\n");
506                 return err;
507         }
508
509         offset += warp_size;
510         err = drm_addmap( dev, offset, dma_bs->primary_size,
511                           _DRM_AGP, _DRM_READ_ONLY, & dev_priv->primary );
512         if (err) {
513                 DRM_ERROR("Unable to map primary DMA region\n");
514                 return err;
515         }
516
517         offset += dma_bs->primary_size;
518         err = drm_addmap( dev, offset, secondary_size,
519                           _DRM_AGP, 0, & dev->agp_buffer_map );
520         if (err) {
521                 DRM_ERROR("Unable to map secondary DMA region\n");
522                 return err;
523         }
524
525         (void) memset( &req, 0, sizeof(req) );
526         req.count = dma_bs->secondary_bin_count;
527         req.size = dma_bs->secondary_bin_size;
528         req.flags = _DRM_AGP_BUFFER;
529         req.agp_start = offset;
530
531         err = drm_addbufs_agp( dev, & req );
532         if (err) {
533                 DRM_ERROR("Unable to add secondary DMA buffers\n");
534                 return err;
535         }
536
537         offset += secondary_size;
538         err = drm_addmap( dev, offset, agp_size - offset,
539                           _DRM_AGP, 0, & dev_priv->agp_textures );
540         if (err) {
541                 DRM_ERROR("Unable to map AGP texture region\n");
542                 return err;
543         }
544
545         drm_core_ioremap(dev_priv->warp, dev);
546         drm_core_ioremap(dev_priv->primary, dev);
547         drm_core_ioremap(dev->agp_buffer_map, dev);
548
549         if (!dev_priv->warp->handle ||
550             !dev_priv->primary->handle || !dev->agp_buffer_map->handle) {
551                 DRM_ERROR("failed to ioremap agp regions! (%p, %p, %p)\n",
552                           dev_priv->warp->handle, dev_priv->primary->handle,
553                           dev->agp_buffer_map->handle);
554                 return DRM_ERR(ENOMEM);
555         }
556
557         dev_priv->dma_access = MGA_PAGPXFER;
558         dev_priv->wagp_enable = MGA_WAGP_ENABLE;
559
560         DRM_INFO("Initialized card for AGP DMA.\n");
561         return 0;
562 }
563
564 /**
565  * Bootstrap the driver for PCI DMA.
566  * 
567  * \todo
568  * The algorithm for decreasing the size of the primary DMA buffer could be
569  * better.  The size should be rounded up to the nearest page size, then
570  * decrease the request size by a single page each pass through the loop.
571  *
572  * \todo
573  * Determine whether the maximum address passed to drm_pci_alloc is correct.
574  * The same goes for drm_addbufs_pci.
575  * 
576  * \sa mga_do_dma_bootstrap, mga_do_agp_dma_bootstrap
577  */
578 static int mga_do_pci_dma_bootstrap(drm_device_t * dev,
579                                     drm_mga_dma_bootstrap_t * dma_bs)
580 {
581         drm_mga_private_t * const dev_priv = (drm_mga_private_t *) dev->dev_private;
582         const unsigned int warp_size = mga_warp_microcode_size(dev_priv);
583         unsigned int primary_size;
584         unsigned int bin_count;
585         int err;
586         drm_buf_desc_t req;
587
588         
589         if (dev->dma == NULL) {
590                 DRM_ERROR("dev->dma is NULL\n");
591                 return DRM_ERR(EFAULT);
592         }
593
594         /* The proper alignment is 0x100 for this mapping */
595         err = drm_addmap(dev, 0, warp_size, _DRM_CONSISTENT,
596                          _DRM_READ_ONLY, &dev_priv->warp);
597         if (err != 0) {
598                 DRM_ERROR("Unable to create mapping for WARP microcode\n");
599                 return err;
600         }
601
602         /* Other than the bottom two bits being used to encode other
603          * information, there don't appear to be any restrictions on the
604          * alignment of the primary or secondary DMA buffers.
605          */
606
607         for ( primary_size = dma_bs->primary_size
608               ; primary_size != 0
609               ; primary_size >>= 1 ) {
610                 /* The proper alignment for this mapping is 0x04 */
611                 err = drm_addmap(dev, 0, primary_size, _DRM_CONSISTENT,
612                                  _DRM_READ_ONLY, &dev_priv->primary);
613                 if (!err)
614                         break;
615         }
616
617         if (err != 0) {
618                 DRM_ERROR("Unable to allocate primary DMA region\n");
619                 return DRM_ERR(ENOMEM);
620         }
621
622         if (dev_priv->primary->size != dma_bs->primary_size) {
623                 DRM_INFO("Primary DMA buffer size reduced from %u to %u.\n",
624                          dma_bs->primary_size, 
625                          (unsigned) dev_priv->primary->size);
626                 dma_bs->primary_size = dev_priv->primary->size;
627         }
628
629         for ( bin_count = dma_bs->secondary_bin_count
630               ; bin_count > 0 
631               ; bin_count-- ) {
632                 (void) memset( &req, 0, sizeof(req) );
633                 req.count = bin_count;
634                 req.size = dma_bs->secondary_bin_size;
635
636                 err = drm_addbufs_pci( dev, & req );
637                 if (!err) {
638                         break;
639                 }
640         }
641         
642         if (bin_count == 0) {
643                 DRM_ERROR("Unable to add secondary DMA buffers\n");
644                 return err;
645         }
646
647         if (bin_count != dma_bs->secondary_bin_count) {
648                 DRM_INFO("Secondary PCI DMA buffer bin count reduced from %u "
649                          "to %u.\n", dma_bs->secondary_bin_count, bin_count);
650
651                 dma_bs->secondary_bin_count = bin_count;
652         }
653
654         dev_priv->dma_access = 0;
655         dev_priv->wagp_enable = 0;
656
657         dma_bs->agp_mode = 0;
658
659         DRM_INFO("Initialized card for PCI DMA.\n");
660         return 0;
661 }
662
663
664 static int mga_do_dma_bootstrap(drm_device_t * dev,
665                                 drm_mga_dma_bootstrap_t * dma_bs)
666 {
667         const int is_agp = (dma_bs->agp_mode != 0) && drm_device_is_agp(dev);
668         int err;
669         drm_mga_private_t * const dev_priv =
670                 (drm_mga_private_t *) dev->dev_private;
671
672
673         dev_priv->used_new_dma_init = 1;
674
675         /* The first steps are the same for both PCI and AGP based DMA.  Map
676          * the cards MMIO registers and map a status page.
677          */
678         err = drm_addmap( dev, dev_priv->mmio_base, dev_priv->mmio_size,
679                           _DRM_REGISTERS, _DRM_READ_ONLY, & dev_priv->mmio );
680         if (err) {
681                 DRM_ERROR("Unable to map MMIO region\n");
682                 return err;
683         }
684
685
686         err = drm_addmap( dev, 0, SAREA_MAX, _DRM_SHM,
687                           _DRM_READ_ONLY | _DRM_LOCKED | _DRM_KERNEL,
688                           & dev_priv->status );
689         if (err) {
690                 DRM_ERROR("Unable to map status region\n");
691                 return err;
692         }
693
694
695         /* The DMA initialization procedure is slightly different for PCI and
696          * AGP cards.  AGP cards just allocate a large block of AGP memory and
697          * carve off portions of it for internal uses.  The remaining memory
698          * is returned to user-mode to be used for AGP textures.
699          */
700
701         if (is_agp) {
702                 err = mga_do_agp_dma_bootstrap(dev, dma_bs);
703         }
704         
705         /* If we attempted to initialize the card for AGP DMA but failed,
706          * clean-up any mess that may have been created.
707          */
708
709         if (err) {
710                 mga_do_cleanup_dma(dev);
711         }
712
713
714         /* Not only do we want to try and initialized PCI cards for PCI DMA,
715          * but we also try to initialized AGP cards that could not be
716          * initialized for AGP DMA.  This covers the case where we have an AGP
717          * card in a system with an unsupported AGP chipset.  In that case the
718          * card will be detected as AGP, but we won't be able to allocate any
719          * AGP memory, etc.
720          */
721
722         if (!is_agp || err) {
723                 err = mga_do_pci_dma_bootstrap(dev, dma_bs);
724         }
725
726
727         return err;
728 }
729
730 int mga_dma_bootstrap(DRM_IOCTL_ARGS)
731 {
732         DRM_DEVICE;
733         drm_mga_dma_bootstrap_t bootstrap;
734         int err;
735
736
737         DRM_COPY_FROM_USER_IOCTL(bootstrap,
738                                  (drm_mga_dma_bootstrap_t __user *) data,
739                                  sizeof(bootstrap));
740
741         err = mga_do_dma_bootstrap(dev, & bootstrap);
742         if (! err) {
743                 static const int modes[] = { 0, 1, 2, 2, 4, 4, 4, 4 };
744                 const drm_mga_private_t * const dev_priv = 
745                         (drm_mga_private_t *) dev->dev_private;
746
747                 if (dev_priv->agp_textures != NULL) {
748                         bootstrap.texture_handle = dev_priv->agp_textures->offset;
749                         bootstrap.texture_size = dev_priv->agp_textures->size;
750                 }
751                 else {
752                         bootstrap.texture_handle = 0;
753                         bootstrap.texture_size = 0;
754                 }
755
756                 bootstrap.agp_mode = modes[ bootstrap.agp_mode & 0x07 ];
757                 if (DRM_COPY_TO_USER( (void __user *) data, & bootstrap,
758                                      sizeof(bootstrap))) {
759                         err = DRM_ERR(EFAULT);
760                 }
761         }
762         else {
763                 mga_do_cleanup_dma(dev);
764         }
765
766         return err;
767 }
768
769 static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init )
770 {
771         drm_mga_private_t *dev_priv;
772         int ret;
773         DRM_DEBUG( "\n" );
774
775
776         dev_priv = dev->dev_private;
777
778         if (init->sgram) {
779                 dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_BLK;
780         } else {
781                 dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_RSTR;
782         }
783         dev_priv->maccess       = init->maccess;
784
785         dev_priv->fb_cpp        = init->fb_cpp;
786         dev_priv->front_offset  = init->front_offset;
787         dev_priv->front_pitch   = init->front_pitch;
788         dev_priv->back_offset   = init->back_offset;
789         dev_priv->back_pitch    = init->back_pitch;
790
791         dev_priv->depth_cpp     = init->depth_cpp;
792         dev_priv->depth_offset  = init->depth_offset;
793         dev_priv->depth_pitch   = init->depth_pitch;
794
795         /* FIXME: Need to support AGP textures...
796          */
797         dev_priv->texture_offset = init->texture_offset[0];
798         dev_priv->texture_size = init->texture_size[0];
799
800         DRM_GETSAREA();
801
802         if (!dev_priv->sarea) {
803                 DRM_ERROR("failed to find sarea!\n");
804                 return DRM_ERR(EINVAL);
805         }
806
807         if (! dev_priv->used_new_dma_init) {
808                 dev_priv->status = drm_core_findmap(dev, init->status_offset);
809                 if (!dev_priv->status) {
810                         DRM_ERROR("failed to find status page!\n");
811                         return DRM_ERR(EINVAL);
812                 }
813                 dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
814                 if (!dev_priv->mmio) {
815                         DRM_ERROR("failed to find mmio region!\n");
816                         return DRM_ERR(EINVAL);
817                 }
818                 dev_priv->warp = drm_core_findmap(dev, init->warp_offset);
819                 if (!dev_priv->warp) {
820                         DRM_ERROR("failed to find warp microcode region!\n");
821                         return DRM_ERR(EINVAL);
822                 }
823                 dev_priv->primary = drm_core_findmap(dev, init->primary_offset);
824                 if (!dev_priv->primary) {
825                         DRM_ERROR("failed to find primary dma region!\n");
826                         return DRM_ERR(EINVAL);
827                 }
828                 dev->agp_buffer_token = init->buffers_offset;
829                 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
830                 if (!dev->agp_buffer_map) {
831                         DRM_ERROR("failed to find dma buffer region!\n");
832                         return DRM_ERR(EINVAL);
833                 }
834
835                 drm_core_ioremap(dev_priv->warp, dev);
836                 drm_core_ioremap(dev_priv->primary, dev);
837                 drm_core_ioremap(dev->agp_buffer_map, dev);
838         }
839
840         dev_priv->sarea_priv =
841                 (drm_mga_sarea_t *)((u8 *)dev_priv->sarea->handle +
842                                     init->sarea_priv_offset);
843
844         if (!dev_priv->warp->handle ||
845             !dev_priv->primary->handle ||
846             ((dev_priv->dma_access != 0) &&
847              ((dev->agp_buffer_map == NULL) ||
848               (dev->agp_buffer_map->handle == NULL)))) {
849                 DRM_ERROR("failed to ioremap agp regions!\n");
850                 return DRM_ERR(ENOMEM);
851         }
852
853         ret = mga_warp_install_microcode(dev_priv);
854         if (ret < 0) {
855                 DRM_ERROR("failed to install WARP ucode!\n");
856                 return ret;
857         }
858
859         ret = mga_warp_init(dev_priv);
860         if (ret < 0) {
861                 DRM_ERROR("failed to init WARP engine!\n");
862                 return ret;
863         }
864
865         dev_priv->prim.status = (u32 *)dev_priv->status->handle;
866
867         mga_do_wait_for_idle( dev_priv );
868
869         /* Init the primary DMA registers.
870          */
871         MGA_WRITE( MGA_PRIMADDRESS,
872                    dev_priv->primary->offset | MGA_DMA_GENERAL );
873 #if 0
874         MGA_WRITE( MGA_PRIMPTR,
875                    virt_to_bus((void *)dev_priv->prim.status) |
876                    MGA_PRIMPTREN0 |     /* Soft trap, SECEND, SETUPEND */
877                    MGA_PRIMPTREN1 );    /* DWGSYNC */
878 #endif
879
880         dev_priv->prim.start = (u8 *)dev_priv->primary->handle;
881         dev_priv->prim.end = ((u8 *)dev_priv->primary->handle
882                               + dev_priv->primary->size);
883         dev_priv->prim.size = dev_priv->primary->size;
884
885         dev_priv->prim.tail = 0;
886         dev_priv->prim.space = dev_priv->prim.size;
887         dev_priv->prim.wrapped = 0;
888
889         dev_priv->prim.last_flush = 0;
890         dev_priv->prim.last_wrap = 0;
891
892         dev_priv->prim.high_mark = 256 * DMA_BLOCK_SIZE;
893
894         dev_priv->prim.status[0] = dev_priv->primary->offset;
895         dev_priv->prim.status[1] = 0;
896
897         dev_priv->sarea_priv->last_wrap = 0;
898         dev_priv->sarea_priv->last_frame.head = 0;
899         dev_priv->sarea_priv->last_frame.wrap = 0;
900
901         if (mga_freelist_init(dev, dev_priv) < 0) {
902                 DRM_ERROR("could not initialize freelist\n");
903                 return DRM_ERR(ENOMEM);
904         }
905
906         return 0;
907 }
908
909 static int mga_do_cleanup_dma( drm_device_t *dev )
910 {
911         int err = 0;
912         DRM_DEBUG("\n");
913
914         /* Make sure interrupts are disabled here because the uninstall ioctl
915          * may not have been called from userspace and after dev_private
916          * is freed, it's too late.
917          */
918         if ( dev->irq_enabled ) drm_irq_uninstall(dev);
919
920         if ( dev->dev_private ) {
921                 drm_mga_private_t *dev_priv = dev->dev_private;
922
923                 if ((dev_priv->warp != NULL) 
924                     && (dev_priv->mmio->type != _DRM_CONSISTENT))
925                         drm_core_ioremapfree(dev_priv->warp, dev);
926
927                 if ((dev_priv->primary != NULL) 
928                     && (dev_priv->primary->type != _DRM_CONSISTENT))
929                         drm_core_ioremapfree(dev_priv->primary, dev);
930
931                 if (dev->agp_buffer_map != NULL)
932                         drm_core_ioremapfree(dev->agp_buffer_map, dev);
933
934                 if (dev_priv->used_new_dma_init) {
935                         if (dev_priv->agp_mem != NULL) {
936                                 dev_priv->agp_textures = NULL;
937                                 drm_unbind_agp(dev_priv->agp_mem);
938
939                                 drm_free_agp(dev_priv->agp_mem, dev_priv->agp_pages);
940                                 dev_priv->agp_pages = 0;
941                                 dev_priv->agp_mem = NULL;
942                         }
943
944                         if ((dev->agp != NULL) && dev->agp->acquired) {
945                                 err = drm_agp_release(dev);
946                         }
947
948                         dev_priv->used_new_dma_init = 0;
949                 }
950
951                 dev_priv->warp = NULL;
952                 dev_priv->primary = NULL;
953                 dev_priv->mmio = NULL;
954                 dev_priv->status = NULL;
955                 dev_priv->sarea = NULL;
956                 dev_priv->sarea_priv = NULL;
957                 dev->agp_buffer_map = NULL;
958
959                 memset(&dev_priv->prim, 0, sizeof(dev_priv->prim));
960                 dev_priv->warp_pipe = 0;
961                 memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys));
962
963                 if (dev_priv->head != NULL) {
964                         mga_freelist_cleanup(dev);
965                 }
966         }
967
968         return 0;
969 }
970
971 int mga_dma_init( DRM_IOCTL_ARGS )
972 {
973         DRM_DEVICE;
974         drm_mga_init_t init;
975         int err;
976
977         LOCK_TEST_WITH_RETURN( dev, filp );
978
979         DRM_COPY_FROM_USER_IOCTL(init, (drm_mga_init_t __user *) data,
980                                  sizeof(init));
981
982         switch ( init.func ) {
983         case MGA_INIT_DMA:
984                 err = mga_do_init_dma(dev, &init);
985                 if (err) {
986                         (void) mga_do_cleanup_dma(dev);
987                 }
988                 return err;
989         case MGA_CLEANUP_DMA:
990                 return mga_do_cleanup_dma( dev );
991         }
992
993         return DRM_ERR(EINVAL);
994 }
995
996
997 /* ================================================================
998  * Primary DMA stream management
999  */
1000
1001 int mga_dma_flush( DRM_IOCTL_ARGS )
1002 {
1003         DRM_DEVICE;
1004         drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
1005         drm_lock_t lock;
1006
1007         LOCK_TEST_WITH_RETURN( dev, filp );
1008
1009         DRM_COPY_FROM_USER_IOCTL( lock, (drm_lock_t __user *)data, sizeof(lock) );
1010
1011         DRM_DEBUG( "%s%s%s\n",
1012                    (lock.flags & _DRM_LOCK_FLUSH) ?     "flush, " : "",
1013                    (lock.flags & _DRM_LOCK_FLUSH_ALL) ? "flush all, " : "",
1014                    (lock.flags & _DRM_LOCK_QUIESCENT) ? "idle, " : "" );
1015
1016         WRAP_WAIT_WITH_RETURN( dev_priv );
1017
1018         if ( lock.flags & (_DRM_LOCK_FLUSH | _DRM_LOCK_FLUSH_ALL) ) {
1019                 mga_do_dma_flush( dev_priv );
1020         }
1021
1022         if ( lock.flags & _DRM_LOCK_QUIESCENT ) {
1023 #if MGA_DMA_DEBUG
1024                 int ret = mga_do_wait_for_idle( dev_priv );
1025                 if ( ret < 0 )
1026                         DRM_INFO( "%s: -EBUSY\n", __FUNCTION__ );
1027                 return ret;
1028 #else
1029                 return mga_do_wait_for_idle( dev_priv );
1030 #endif
1031         } else {
1032                 return 0;
1033         }
1034 }
1035
1036 int mga_dma_reset( DRM_IOCTL_ARGS )
1037 {
1038         DRM_DEVICE;
1039         drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
1040
1041         LOCK_TEST_WITH_RETURN( dev, filp );
1042
1043         return mga_do_dma_reset( dev_priv );
1044 }
1045
1046
1047 /* ================================================================
1048  * DMA buffer management
1049  */
1050
1051 static int mga_dma_get_buffers( DRMFILE filp,
1052                                 drm_device_t *dev, drm_dma_t *d )
1053 {
1054         drm_buf_t *buf;
1055         int i;
1056
1057         for ( i = d->granted_count ; i < d->request_count ; i++ ) {
1058                 buf = mga_freelist_get( dev );
1059                 if ( !buf ) return DRM_ERR(EAGAIN);
1060
1061                 buf->filp = filp;
1062
1063                 if ( DRM_COPY_TO_USER( &d->request_indices[i],
1064                                    &buf->idx, sizeof(buf->idx) ) )
1065                         return DRM_ERR(EFAULT);
1066                 if ( DRM_COPY_TO_USER( &d->request_sizes[i],
1067                                    &buf->total, sizeof(buf->total) ) )
1068                         return DRM_ERR(EFAULT);
1069
1070                 d->granted_count++;
1071         }
1072         return 0;
1073 }
1074
1075 int mga_dma_buffers( DRM_IOCTL_ARGS )
1076 {
1077         DRM_DEVICE;
1078         drm_device_dma_t *dma = dev->dma;
1079         drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
1080         drm_dma_t __user *argp = (void __user *)data;
1081         drm_dma_t d;
1082         int ret = 0;
1083
1084         LOCK_TEST_WITH_RETURN( dev, filp );
1085
1086         DRM_COPY_FROM_USER_IOCTL( d, argp, sizeof(d) );
1087
1088         /* Please don't send us buffers.
1089          */
1090         if ( d.send_count != 0 ) {
1091                 DRM_ERROR( "Process %d trying to send %d buffers via drmDMA\n",
1092                            DRM_CURRENTPID, d.send_count );
1093                 return DRM_ERR(EINVAL);
1094         }
1095
1096         /* We'll send you buffers.
1097          */
1098         if ( d.request_count < 0 || d.request_count > dma->buf_count ) {
1099                 DRM_ERROR( "Process %d trying to get %d buffers (of %d max)\n",
1100                            DRM_CURRENTPID, d.request_count, dma->buf_count );
1101                 return DRM_ERR(EINVAL);
1102         }
1103
1104         WRAP_TEST_WITH_RETURN( dev_priv );
1105
1106         d.granted_count = 0;
1107
1108         if ( d.request_count ) {
1109                 ret = mga_dma_get_buffers( filp, dev, &d );
1110         }
1111
1112         DRM_COPY_TO_USER_IOCTL( argp, d, sizeof(d) );
1113
1114         return ret;
1115 }
1116
1117 /**
1118  * Called just before the module is unloaded.
1119  */
1120 int mga_driver_postcleanup(drm_device_t * dev)
1121 {
1122         drm_free(dev->dev_private, sizeof(drm_mga_private_t), DRM_MEM_DRIVER);
1123         dev->dev_private = NULL;
1124
1125         return 0;
1126 }
1127
1128 /**
1129  * Called when the last opener of the device is closed.
1130  */
1131 void mga_driver_pretakedown(drm_device_t * dev)
1132 {
1133         mga_do_cleanup_dma( dev );
1134 }
1135
1136 int mga_driver_dma_quiescent(drm_device_t *dev)
1137 {
1138         drm_mga_private_t *dev_priv = dev->dev_private;
1139         return mga_do_wait_for_idle( dev_priv );
1140 }