drm: add _DRM_CONSISTENT map type
[linux-2.6.git] / drivers / char / drm / drm_bufs.c
1 /**
2  * \file drm_bufs.h 
3  * Generic buffer template
4  * 
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  */
8
9 /*
10  * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
11  *
12  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14  * All Rights Reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  */
35
36 #include <linux/vmalloc.h>
37 #include "drmP.h"
38
39 /**
40  * Compute size order.  Returns the exponent of the smaller power of two which
41  * is greater or equal to given number.
42  * 
43  * \param size size.
44  * \return order.
45  *
46  * \todo Can be made faster.
47  */
48 int drm_order( unsigned long size )
49 {
50         int order;
51         unsigned long tmp;
52
53         for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++)
54                 ;
55
56         if (size & (size - 1))
57                 ++order;
58
59         return order;
60 }
61 EXPORT_SYMBOL(drm_order);
62
63 #ifdef CONFIG_COMPAT
64 /*
65  * Used to allocate 32-bit handles for _DRM_SHM regions
66  * The 0x10000000 value is chosen to be out of the way of
67  * FB/register and GART physical addresses.
68  */
69 static unsigned int map32_handle = 0x10000000;
70 #endif
71
72 /**
73  * Ioctl to specify a range of memory that is available for mapping by a non-root process.
74  *
75  * \param inode device inode.
76  * \param filp file pointer.
77  * \param cmd command.
78  * \param arg pointer to a drm_map structure.
79  * \return zero on success or a negative value on error.
80  *
81  * Adjusts the memory offset to its absolute value according to the mapping
82  * type.  Adds the map to the map list drm_device::maplist. Adds MTRR's where
83  * applicable and if supported by the kernel.
84  */
85 int drm_addmap( struct inode *inode, struct file *filp,
86                  unsigned int cmd, unsigned long arg )
87 {
88         drm_file_t *priv = filp->private_data;
89         drm_device_t *dev = priv->head->dev;
90         drm_map_t *map;
91         drm_map_t __user *argp = (void __user *)arg;
92         drm_map_list_t *list;
93
94         if ( !(filp->f_mode & 3) ) return -EACCES; /* Require read/write */
95
96         map = drm_alloc( sizeof(*map), DRM_MEM_MAPS );
97         if ( !map )
98                 return -ENOMEM;
99
100         if ( copy_from_user( map, argp, sizeof(*map) ) ) {
101                 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
102                 return -EFAULT;
103         }
104
105         /* Only allow shared memory to be removable since we only keep enough
106          * book keeping information about shared memory to allow for removal
107          * when processes fork.
108          */
109         if ( (map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM ) {
110                 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
111                 return -EINVAL;
112         }
113         DRM_DEBUG( "offset = 0x%08lx, size = 0x%08lx, type = %d\n",
114                    map->offset, map->size, map->type );
115         if ( (map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK)) ) {
116                 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
117                 return -EINVAL;
118         }
119         map->mtrr   = -1;
120         map->handle = NULL;
121
122         switch ( map->type ) {
123         case _DRM_REGISTERS:
124         case _DRM_FRAME_BUFFER:
125 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__)
126                 if ( map->offset + map->size < map->offset ||
127                      map->offset < virt_to_phys(high_memory) ) {
128                         drm_free( map, sizeof(*map), DRM_MEM_MAPS );
129                         return -EINVAL;
130                 }
131 #endif
132 #ifdef __alpha__
133                 map->offset += dev->hose->mem_space->start;
134 #endif
135                 if (drm_core_has_MTRR(dev)) {
136                         if ( map->type == _DRM_FRAME_BUFFER ||
137                              (map->flags & _DRM_WRITE_COMBINING) ) {
138                                 map->mtrr = mtrr_add( map->offset, map->size,
139                                                       MTRR_TYPE_WRCOMB, 1 );
140                         }
141                 }
142                 if (map->type == _DRM_REGISTERS)
143                         map->handle = drm_ioremap( map->offset, map->size,
144                                                     dev );
145                 break;
146
147         case _DRM_SHM:
148                 map->handle = vmalloc_32(map->size);
149                 DRM_DEBUG( "%lu %d %p\n",
150                            map->size, drm_order( map->size ), map->handle );
151                 if ( !map->handle ) {
152                         drm_free( map, sizeof(*map), DRM_MEM_MAPS );
153                         return -ENOMEM;
154                 }
155                 map->offset = (unsigned long)map->handle;
156                 if ( map->flags & _DRM_CONTAINS_LOCK ) {
157                         /* Prevent a 2nd X Server from creating a 2nd lock */
158                         if (dev->lock.hw_lock != NULL) {
159                                 vfree( map->handle );
160                                 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
161                                 return -EBUSY;
162                         }
163                         dev->sigdata.lock =
164                         dev->lock.hw_lock = map->handle; /* Pointer to lock */
165                 }
166                 break;
167         case _DRM_AGP:
168                 if (drm_core_has_AGP(dev)) {
169 #ifdef __alpha__
170                         map->offset += dev->hose->mem_space->start;
171 #endif
172                         map->offset += dev->agp->base;
173                         map->mtrr   = dev->agp->agp_mtrr; /* for getmap */
174                 }
175                 break;
176         case _DRM_SCATTER_GATHER:
177                 if (!dev->sg) {
178                         drm_free(map, sizeof(*map), DRM_MEM_MAPS);
179                         return -EINVAL;
180                 }
181                 map->offset += dev->sg->handle;
182                 break;
183         case _DRM_CONSISTENT: 
184         {
185                 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
186                  * As we're limit the address to 2^32-1 (or lses),
187                  * casting it down to 32 bits is no problem, but we
188                  * need to point to a 64bit variable first. */
189                 dma_addr_t bus_addr;
190                 map->handle = drm_pci_alloc(dev, map->size, map->size,
191                                             0xffffffffUL, &bus_addr);
192                 map->offset = (unsigned long)bus_addr;
193                 if (!map->handle) {
194                         drm_free(map, sizeof(*map), DRM_MEM_MAPS);
195                         return -ENOMEM;
196                 }
197                 break;
198         }
199         default:
200                 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
201                 return -EINVAL;
202         }
203
204         list = drm_alloc(sizeof(*list), DRM_MEM_MAPS);
205         if(!list) {
206                 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
207                 return -EINVAL;
208         }
209         memset(list, 0, sizeof(*list));
210         list->map = map;
211
212         down(&dev->struct_sem);
213         list_add(&list->head, &dev->maplist->head);
214 #ifdef CONFIG_COMPAT
215         /* Assign a 32-bit handle for _DRM_SHM mappings */
216         /* We do it here so that dev->struct_sem protects the increment */
217         if (map->type == _DRM_SHM)
218                 map->offset = map32_handle += PAGE_SIZE;
219 #endif
220         up(&dev->struct_sem);
221
222         if ( copy_to_user( argp, map, sizeof(*map) ) )
223                 return -EFAULT;
224         if (copy_to_user(&argp->handle, &map->offset, sizeof(map->offset)))
225                 return -EFAULT;
226         return 0;
227 }
228
229
230 /**
231  * Remove a map private from list and deallocate resources if the mapping
232  * isn't in use.
233  *
234  * \param inode device inode.
235  * \param filp file pointer.
236  * \param cmd command.
237  * \param arg pointer to a drm_map_t structure.
238  * \return zero on success or a negative value on error.
239  *
240  * Searches the map on drm_device::maplist, removes it from the list, see if
241  * its being used, and free any associate resource (such as MTRR's) if it's not
242  * being on use.
243  *
244  * \sa addmap().
245  */
246 int drm_rmmap(struct inode *inode, struct file *filp,
247                unsigned int cmd, unsigned long arg)
248 {
249         drm_file_t      *priv   = filp->private_data;
250         drm_device_t    *dev    = priv->head->dev;
251         struct list_head *list;
252         drm_map_list_t *r_list = NULL;
253         drm_vma_entry_t *pt, *prev;
254         drm_map_t *map;
255         drm_map_t request;
256         int found_maps = 0;
257
258         if (copy_from_user(&request, (drm_map_t __user *)arg,
259                            sizeof(request))) {
260                 return -EFAULT;
261         }
262
263         down(&dev->struct_sem);
264         list = &dev->maplist->head;
265         list_for_each(list, &dev->maplist->head) {
266                 r_list = list_entry(list, drm_map_list_t, head);
267
268                 if(r_list->map &&
269                    r_list->map->offset == (unsigned long) request.handle &&
270                    r_list->map->flags & _DRM_REMOVABLE) break;
271         }
272
273         /* List has wrapped around to the head pointer, or its empty we didn't
274          * find anything.
275          */
276         if(list == (&dev->maplist->head)) {
277                 up(&dev->struct_sem);
278                 return -EINVAL;
279         }
280         map = r_list->map;
281         list_del(list);
282         drm_free(list, sizeof(*list), DRM_MEM_MAPS);
283
284         for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
285                 if (pt->vma->vm_private_data == map) found_maps++;
286         }
287
288         if(!found_maps) {
289                 switch (map->type) {
290                 case _DRM_REGISTERS:
291                 case _DRM_FRAME_BUFFER:
292                   if (drm_core_has_MTRR(dev)) {
293                                 if (map->mtrr >= 0) {
294                                         int retcode;
295                                         retcode = mtrr_del(map->mtrr,
296                                                            map->offset,
297                                                            map->size);
298                                         DRM_DEBUG("mtrr_del = %d\n", retcode);
299                                 }
300                         }
301                         drm_ioremapfree(map->handle, map->size, dev);
302                         break;
303                 case _DRM_SHM:
304                         vfree(map->handle);
305                         break;
306                 case _DRM_AGP:
307                 case _DRM_SCATTER_GATHER:
308                         break;
309                 case _DRM_CONSISTENT:
310                         drm_pci_free(dev, map->size, map->handle, map->offset);
311                         break;
312                 }
313                 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
314         }
315         up(&dev->struct_sem);
316         return 0;
317 }
318
319 /**
320  * Cleanup after an error on one of the addbufs() functions.
321  *
322  * \param entry buffer entry where the error occurred.
323  *
324  * Frees any pages and buffers associated with the given entry.
325  */
326 static void drm_cleanup_buf_error(drm_device_t *dev, drm_buf_entry_t *entry)
327 {
328         int i;
329
330         if (entry->seg_count) {
331                 for (i = 0; i < entry->seg_count; i++) {
332                         if (entry->seglist[i]) {
333                                 drm_free_pages(entry->seglist[i],
334                                                 entry->page_order,
335                                                 DRM_MEM_DMA);
336                         }
337                 }
338                 drm_free(entry->seglist,
339                           entry->seg_count *
340                           sizeof(*entry->seglist),
341                           DRM_MEM_SEGS);
342
343                 entry->seg_count = 0;
344         }
345
346         if (entry->buf_count) {
347                 for (i = 0; i < entry->buf_count; i++) {
348                         if (entry->buflist[i].dev_private) {
349                                 drm_free(entry->buflist[i].dev_private,
350                                           entry->buflist[i].dev_priv_size,
351                                           DRM_MEM_BUFS);
352                         }
353                 }
354                 drm_free(entry->buflist,
355                           entry->buf_count *
356                           sizeof(*entry->buflist),
357                           DRM_MEM_BUFS);
358
359                 entry->buf_count = 0;
360         }
361 }
362
363 #if __OS_HAS_AGP
364 /**
365  * Add AGP buffers for DMA transfers (ioctl).
366  *
367  * \param inode device inode.
368  * \param filp file pointer.
369  * \param cmd command.
370  * \param arg pointer to a drm_buf_desc_t request.
371  * \return zero on success or a negative number on failure.
372  * 
373  * After some sanity checks creates a drm_buf structure for each buffer and
374  * reallocates the buffer list of the same size order to accommodate the new
375  * buffers.
376  */
377 static int drm_addbufs_agp( struct inode *inode, struct file *filp,
378                             unsigned int cmd, unsigned long arg )
379 {
380         drm_file_t *priv = filp->private_data;
381         drm_device_t *dev = priv->head->dev;
382         drm_device_dma_t *dma = dev->dma;
383         drm_buf_desc_t request;
384         drm_buf_entry_t *entry;
385         drm_buf_t *buf;
386         unsigned long offset;
387         unsigned long agp_offset;
388         int count;
389         int order;
390         int size;
391         int alignment;
392         int page_order;
393         int total;
394         int byte_count;
395         int i;
396         drm_buf_t **temp_buflist;
397         drm_buf_desc_t __user *argp = (void __user *)arg;
398
399         if ( !dma ) return -EINVAL;
400
401         if ( copy_from_user( &request, argp,
402                              sizeof(request) ) )
403                 return -EFAULT;
404
405         count = request.count;
406         order = drm_order( request.size );
407         size = 1 << order;
408
409         alignment  = (request.flags & _DRM_PAGE_ALIGN)
410                 ? PAGE_ALIGN(size) : size;
411         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
412         total = PAGE_SIZE << page_order;
413
414         byte_count = 0;
415         agp_offset = dev->agp->base + request.agp_start;
416
417         DRM_DEBUG( "count:      %d\n",  count );
418         DRM_DEBUG( "order:      %d\n",  order );
419         DRM_DEBUG( "size:       %d\n",  size );
420         DRM_DEBUG( "agp_offset: %lu\n", agp_offset );
421         DRM_DEBUG( "alignment:  %d\n",  alignment );
422         DRM_DEBUG( "page_order: %d\n",  page_order );
423         DRM_DEBUG( "total:      %d\n",  total );
424
425         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
426         if ( dev->queue_count ) return -EBUSY; /* Not while in use */
427
428         spin_lock( &dev->count_lock );
429         if ( dev->buf_use ) {
430                 spin_unlock( &dev->count_lock );
431                 return -EBUSY;
432         }
433         atomic_inc( &dev->buf_alloc );
434         spin_unlock( &dev->count_lock );
435
436         down( &dev->struct_sem );
437         entry = &dma->bufs[order];
438         if ( entry->buf_count ) {
439                 up( &dev->struct_sem );
440                 atomic_dec( &dev->buf_alloc );
441                 return -ENOMEM; /* May only call once for each order */
442         }
443
444         if (count < 0 || count > 4096) {
445                 up( &dev->struct_sem );
446                 atomic_dec( &dev->buf_alloc );
447                 return -EINVAL;
448         }
449
450         entry->buflist = drm_alloc( count * sizeof(*entry->buflist),
451                                     DRM_MEM_BUFS );
452         if ( !entry->buflist ) {
453                 up( &dev->struct_sem );
454                 atomic_dec( &dev->buf_alloc );
455                 return -ENOMEM;
456         }
457         memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
458
459         entry->buf_size = size;
460         entry->page_order = page_order;
461
462         offset = 0;
463
464         while ( entry->buf_count < count ) {
465                 buf          = &entry->buflist[entry->buf_count];
466                 buf->idx     = dma->buf_count + entry->buf_count;
467                 buf->total   = alignment;
468                 buf->order   = order;
469                 buf->used    = 0;
470
471                 buf->offset  = (dma->byte_count + offset);
472                 buf->bus_address = agp_offset + offset;
473                 buf->address = (void *)(agp_offset + offset);
474                 buf->next    = NULL;
475                 buf->waiting = 0;
476                 buf->pending = 0;
477                 init_waitqueue_head( &buf->dma_wait );
478                 buf->filp    = NULL;
479
480                 buf->dev_priv_size = dev->driver->dev_priv_size;
481                 buf->dev_private = drm_alloc( buf->dev_priv_size,
482                                                DRM_MEM_BUFS );
483                 if(!buf->dev_private) {
484                         /* Set count correctly so we free the proper amount. */
485                         entry->buf_count = count;
486                         drm_cleanup_buf_error(dev,entry);
487                         up( &dev->struct_sem );
488                         atomic_dec( &dev->buf_alloc );
489                         return -ENOMEM;
490                 }
491                 memset( buf->dev_private, 0, buf->dev_priv_size );
492
493                 DRM_DEBUG( "buffer %d @ %p\n",
494                            entry->buf_count, buf->address );
495
496                 offset += alignment;
497                 entry->buf_count++;
498                 byte_count += PAGE_SIZE << page_order;
499         }
500
501         DRM_DEBUG( "byte_count: %d\n", byte_count );
502
503         temp_buflist = drm_realloc( dma->buflist,
504                                      dma->buf_count * sizeof(*dma->buflist),
505                                      (dma->buf_count + entry->buf_count)
506                                      * sizeof(*dma->buflist),
507                                      DRM_MEM_BUFS );
508         if(!temp_buflist) {
509                 /* Free the entry because it isn't valid */
510                 drm_cleanup_buf_error(dev,entry);
511                 up( &dev->struct_sem );
512                 atomic_dec( &dev->buf_alloc );
513                 return -ENOMEM;
514         }
515         dma->buflist = temp_buflist;
516
517         for ( i = 0 ; i < entry->buf_count ; i++ ) {
518                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
519         }
520
521         dma->buf_count += entry->buf_count;
522         dma->byte_count += byte_count;
523
524         DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
525         DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
526
527         up( &dev->struct_sem );
528
529         request.count = entry->buf_count;
530         request.size = size;
531
532         if ( copy_to_user( argp, &request, sizeof(request) ) )
533                 return -EFAULT;
534
535         dma->flags = _DRM_DMA_USE_AGP;
536
537         atomic_dec( &dev->buf_alloc );
538         return 0;
539 }
540 #endif /* __OS_HAS_AGP */
541
542 static int drm_addbufs_pci( struct inode *inode, struct file *filp,
543                             unsigned int cmd, unsigned long arg )
544 {
545         drm_file_t *priv = filp->private_data;
546         drm_device_t *dev = priv->head->dev;
547         drm_device_dma_t *dma = dev->dma;
548         drm_buf_desc_t request;
549         int count;
550         int order;
551         int size;
552         int total;
553         int page_order;
554         drm_buf_entry_t *entry;
555         unsigned long page;
556         drm_buf_t *buf;
557         int alignment;
558         unsigned long offset;
559         int i;
560         int byte_count;
561         int page_count;
562         unsigned long *temp_pagelist;
563         drm_buf_t **temp_buflist;
564         drm_buf_desc_t __user *argp = (void __user *)arg;
565
566         if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) return -EINVAL;
567         if ( !dma ) return -EINVAL;
568
569         if ( copy_from_user( &request, argp, sizeof(request) ) )
570                 return -EFAULT;
571
572         count = request.count;
573         order = drm_order( request.size );
574         size = 1 << order;
575
576         DRM_DEBUG( "count=%d, size=%d (%d), order=%d, queue_count=%d\n",
577                    request.count, request.size, size,
578                    order, dev->queue_count );
579
580         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
581         if ( dev->queue_count ) return -EBUSY; /* Not while in use */
582
583         alignment = (request.flags & _DRM_PAGE_ALIGN)
584                 ? PAGE_ALIGN(size) : size;
585         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
586         total = PAGE_SIZE << page_order;
587
588         spin_lock( &dev->count_lock );
589         if ( dev->buf_use ) {
590                 spin_unlock( &dev->count_lock );
591                 return -EBUSY;
592         }
593         atomic_inc( &dev->buf_alloc );
594         spin_unlock( &dev->count_lock );
595
596         down( &dev->struct_sem );
597         entry = &dma->bufs[order];
598         if ( entry->buf_count ) {
599                 up( &dev->struct_sem );
600                 atomic_dec( &dev->buf_alloc );
601                 return -ENOMEM; /* May only call once for each order */
602         }
603
604         if (count < 0 || count > 4096) {
605                 up( &dev->struct_sem );
606                 atomic_dec( &dev->buf_alloc );
607                 return -EINVAL;
608         }
609
610         entry->buflist = drm_alloc( count * sizeof(*entry->buflist),
611                                     DRM_MEM_BUFS );
612         if ( !entry->buflist ) {
613                 up( &dev->struct_sem );
614                 atomic_dec( &dev->buf_alloc );
615                 return -ENOMEM;
616         }
617         memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
618
619         entry->seglist = drm_alloc( count * sizeof(*entry->seglist),
620                                     DRM_MEM_SEGS );
621         if ( !entry->seglist ) {
622                 drm_free( entry->buflist,
623                           count * sizeof(*entry->buflist),
624                           DRM_MEM_BUFS );
625                 up( &dev->struct_sem );
626                 atomic_dec( &dev->buf_alloc );
627                 return -ENOMEM;
628         }
629         memset( entry->seglist, 0, count * sizeof(*entry->seglist) );
630
631         /* Keep the original pagelist until we know all the allocations
632          * have succeeded
633          */
634         temp_pagelist = drm_alloc( (dma->page_count + (count << page_order))
635                                     * sizeof(*dma->pagelist),
636                                     DRM_MEM_PAGES );
637         if (!temp_pagelist) {
638                 drm_free( entry->buflist,
639                            count * sizeof(*entry->buflist),
640                            DRM_MEM_BUFS );
641                 drm_free( entry->seglist,
642                            count * sizeof(*entry->seglist),
643                            DRM_MEM_SEGS );
644                 up( &dev->struct_sem );
645                 atomic_dec( &dev->buf_alloc );
646                 return -ENOMEM;
647         }
648         memcpy(temp_pagelist,
649                dma->pagelist,
650                dma->page_count * sizeof(*dma->pagelist));
651         DRM_DEBUG( "pagelist: %d entries\n",
652                    dma->page_count + (count << page_order) );
653
654         entry->buf_size = size;
655         entry->page_order = page_order;
656         byte_count = 0;
657         page_count = 0;
658
659         while ( entry->buf_count < count ) {
660                 page = drm_alloc_pages( page_order, DRM_MEM_DMA );
661                 if ( !page ) {
662                         /* Set count correctly so we free the proper amount. */
663                         entry->buf_count = count;
664                         entry->seg_count = count;
665                         drm_cleanup_buf_error(dev, entry);
666                         drm_free( temp_pagelist,
667                                    (dma->page_count + (count << page_order))
668                                    * sizeof(*dma->pagelist),
669                                    DRM_MEM_PAGES );
670                         up( &dev->struct_sem );
671                         atomic_dec( &dev->buf_alloc );
672                         return -ENOMEM;
673                 }
674                 entry->seglist[entry->seg_count++] = page;
675                 for ( i = 0 ; i < (1 << page_order) ; i++ ) {
676                         DRM_DEBUG( "page %d @ 0x%08lx\n",
677                                    dma->page_count + page_count,
678                                    page + PAGE_SIZE * i );
679                         temp_pagelist[dma->page_count + page_count++]
680                                 = page + PAGE_SIZE * i;
681                 }
682                 for ( offset = 0 ;
683                       offset + size <= total && entry->buf_count < count ;
684                       offset += alignment, ++entry->buf_count ) {
685                         buf          = &entry->buflist[entry->buf_count];
686                         buf->idx     = dma->buf_count + entry->buf_count;
687                         buf->total   = alignment;
688                         buf->order   = order;
689                         buf->used    = 0;
690                         buf->offset  = (dma->byte_count + byte_count + offset);
691                         buf->address = (void *)(page + offset);
692                         buf->next    = NULL;
693                         buf->waiting = 0;
694                         buf->pending = 0;
695                         init_waitqueue_head( &buf->dma_wait );
696                         buf->filp    = NULL;
697
698                         buf->dev_priv_size = dev->driver->dev_priv_size;
699                         buf->dev_private = drm_alloc( buf->dev_priv_size,
700                                                        DRM_MEM_BUFS );
701                         if(!buf->dev_private) {
702                                 /* Set count correctly so we free the proper amount. */
703                                 entry->buf_count = count;
704                                 entry->seg_count = count;
705                                 drm_cleanup_buf_error(dev,entry);
706                                 drm_free( temp_pagelist,
707                                            (dma->page_count + (count << page_order))
708                                            * sizeof(*dma->pagelist),
709                                            DRM_MEM_PAGES );
710                                 up( &dev->struct_sem );
711                                 atomic_dec( &dev->buf_alloc );
712                                 return -ENOMEM;
713                         }
714                         memset( buf->dev_private, 0, buf->dev_priv_size );
715
716                         DRM_DEBUG( "buffer %d @ %p\n",
717                                    entry->buf_count, buf->address );
718                 }
719                 byte_count += PAGE_SIZE << page_order;
720         }
721
722         temp_buflist = drm_realloc( dma->buflist,
723                                      dma->buf_count * sizeof(*dma->buflist),
724                                      (dma->buf_count + entry->buf_count)
725                                      * sizeof(*dma->buflist),
726                                      DRM_MEM_BUFS );
727         if (!temp_buflist) {
728                 /* Free the entry because it isn't valid */
729                 drm_cleanup_buf_error(dev,entry);
730                 drm_free( temp_pagelist,
731                            (dma->page_count + (count << page_order))
732                            * sizeof(*dma->pagelist),
733                            DRM_MEM_PAGES );
734                 up( &dev->struct_sem );
735                 atomic_dec( &dev->buf_alloc );
736                 return -ENOMEM;
737         }
738         dma->buflist = temp_buflist;
739
740         for ( i = 0 ; i < entry->buf_count ; i++ ) {
741                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
742         }
743
744         /* No allocations failed, so now we can replace the orginal pagelist
745          * with the new one.
746          */
747         if (dma->page_count) {
748                 drm_free(dma->pagelist,
749                           dma->page_count * sizeof(*dma->pagelist),
750                           DRM_MEM_PAGES);
751         }
752         dma->pagelist = temp_pagelist;
753
754         dma->buf_count += entry->buf_count;
755         dma->seg_count += entry->seg_count;
756         dma->page_count += entry->seg_count << page_order;
757         dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
758
759         up( &dev->struct_sem );
760
761         request.count = entry->buf_count;
762         request.size = size;
763
764         if ( copy_to_user( argp, &request, sizeof(request) ) )
765                 return -EFAULT;
766
767         atomic_dec( &dev->buf_alloc );
768         return 0;
769
770 }
771
772 static int drm_addbufs_sg( struct inode *inode, struct file *filp,
773                            unsigned int cmd, unsigned long arg )
774 {
775         drm_file_t *priv = filp->private_data;
776         drm_device_t *dev = priv->head->dev;
777         drm_device_dma_t *dma = dev->dma;
778         drm_buf_desc_t __user *argp = (void __user *)arg;
779         drm_buf_desc_t request;
780         drm_buf_entry_t *entry;
781         drm_buf_t *buf;
782         unsigned long offset;
783         unsigned long agp_offset;
784         int count;
785         int order;
786         int size;
787         int alignment;
788         int page_order;
789         int total;
790         int byte_count;
791         int i;
792         drm_buf_t **temp_buflist;
793
794         if (!drm_core_check_feature(dev, DRIVER_SG)) return -EINVAL;
795         
796         if ( !dma ) return -EINVAL;
797
798         if ( copy_from_user( &request, argp, sizeof(request) ) )
799                 return -EFAULT;
800
801         count = request.count;
802         order = drm_order( request.size );
803         size = 1 << order;
804
805         alignment  = (request.flags & _DRM_PAGE_ALIGN)
806                         ? PAGE_ALIGN(size) : size;
807         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
808         total = PAGE_SIZE << page_order;
809
810         byte_count = 0;
811         agp_offset = request.agp_start;
812
813         DRM_DEBUG( "count:      %d\n",  count );
814         DRM_DEBUG( "order:      %d\n",  order );
815         DRM_DEBUG( "size:       %d\n",  size );
816         DRM_DEBUG( "agp_offset: %lu\n", agp_offset );
817         DRM_DEBUG( "alignment:  %d\n",  alignment );
818         DRM_DEBUG( "page_order: %d\n",  page_order );
819         DRM_DEBUG( "total:      %d\n",  total );
820
821         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
822         if ( dev->queue_count ) return -EBUSY; /* Not while in use */
823
824         spin_lock( &dev->count_lock );
825         if ( dev->buf_use ) {
826                 spin_unlock( &dev->count_lock );
827                 return -EBUSY;
828         }
829         atomic_inc( &dev->buf_alloc );
830         spin_unlock( &dev->count_lock );
831
832         down( &dev->struct_sem );
833         entry = &dma->bufs[order];
834         if ( entry->buf_count ) {
835                 up( &dev->struct_sem );
836                 atomic_dec( &dev->buf_alloc );
837                 return -ENOMEM; /* May only call once for each order */
838         }
839
840         if (count < 0 || count > 4096) {
841                 up( &dev->struct_sem );
842                 atomic_dec( &dev->buf_alloc );
843                 return -EINVAL;
844         }
845
846         entry->buflist = drm_alloc( count * sizeof(*entry->buflist),
847                                      DRM_MEM_BUFS );
848         if ( !entry->buflist ) {
849                 up( &dev->struct_sem );
850                 atomic_dec( &dev->buf_alloc );
851                 return -ENOMEM;
852         }
853         memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
854
855         entry->buf_size = size;
856         entry->page_order = page_order;
857
858         offset = 0;
859
860         while ( entry->buf_count < count ) {
861                 buf          = &entry->buflist[entry->buf_count];
862                 buf->idx     = dma->buf_count + entry->buf_count;
863                 buf->total   = alignment;
864                 buf->order   = order;
865                 buf->used    = 0;
866
867                 buf->offset  = (dma->byte_count + offset);
868                 buf->bus_address = agp_offset + offset;
869                 buf->address = (void *)(agp_offset + offset + dev->sg->handle);
870                 buf->next    = NULL;
871                 buf->waiting = 0;
872                 buf->pending = 0;
873                 init_waitqueue_head( &buf->dma_wait );
874                 buf->filp    = NULL;
875
876                 buf->dev_priv_size = dev->driver->dev_priv_size;
877                 buf->dev_private = drm_alloc( buf->dev_priv_size,
878                                                DRM_MEM_BUFS );
879                 if(!buf->dev_private) {
880                         /* Set count correctly so we free the proper amount. */
881                         entry->buf_count = count;
882                         drm_cleanup_buf_error(dev,entry);
883                         up( &dev->struct_sem );
884                         atomic_dec( &dev->buf_alloc );
885                         return -ENOMEM;
886                 }
887
888                 memset( buf->dev_private, 0, buf->dev_priv_size );
889
890                 DRM_DEBUG( "buffer %d @ %p\n",
891                            entry->buf_count, buf->address );
892
893                 offset += alignment;
894                 entry->buf_count++;
895                 byte_count += PAGE_SIZE << page_order;
896         }
897
898         DRM_DEBUG( "byte_count: %d\n", byte_count );
899
900         temp_buflist = drm_realloc( dma->buflist,
901                                      dma->buf_count * sizeof(*dma->buflist),
902                                      (dma->buf_count + entry->buf_count)
903                                      * sizeof(*dma->buflist),
904                                      DRM_MEM_BUFS );
905         if(!temp_buflist) {
906                 /* Free the entry because it isn't valid */
907                 drm_cleanup_buf_error(dev,entry);
908                 up( &dev->struct_sem );
909                 atomic_dec( &dev->buf_alloc );
910                 return -ENOMEM;
911         }
912         dma->buflist = temp_buflist;
913
914         for ( i = 0 ; i < entry->buf_count ; i++ ) {
915                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
916         }
917
918         dma->buf_count += entry->buf_count;
919         dma->byte_count += byte_count;
920
921         DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
922         DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
923
924         up( &dev->struct_sem );
925
926         request.count = entry->buf_count;
927         request.size = size;
928
929         if ( copy_to_user( argp, &request, sizeof(request) ) )
930                 return -EFAULT;
931
932         dma->flags = _DRM_DMA_USE_SG;
933
934         atomic_dec( &dev->buf_alloc );
935         return 0;
936 }
937
938 /**
939  * Add buffers for DMA transfers (ioctl).
940  *
941  * \param inode device inode.
942  * \param filp file pointer.
943  * \param cmd command.
944  * \param arg pointer to a drm_buf_desc_t request.
945  * \return zero on success or a negative number on failure.
946  *
947  * According with the memory type specified in drm_buf_desc::flags and the
948  * build options, it dispatches the call either to addbufs_agp(),
949  * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
950  * PCI memory respectively.
951  */
952 int drm_addbufs( struct inode *inode, struct file *filp,
953                   unsigned int cmd, unsigned long arg )
954 {
955         drm_buf_desc_t request;
956         drm_file_t *priv = filp->private_data;
957         drm_device_t *dev = priv->head->dev;
958         
959         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
960                 return -EINVAL;
961
962         if ( copy_from_user( &request, (drm_buf_desc_t __user *)arg,
963                              sizeof(request) ) )
964                 return -EFAULT;
965
966 #if __OS_HAS_AGP
967         if ( request.flags & _DRM_AGP_BUFFER )
968                 return drm_addbufs_agp( inode, filp, cmd, arg );
969         else
970 #endif
971         if ( request.flags & _DRM_SG_BUFFER )
972                 return drm_addbufs_sg( inode, filp, cmd, arg );
973         else
974                 return drm_addbufs_pci( inode, filp, cmd, arg );
975 }
976
977
978 /**
979  * Get information about the buffer mappings.
980  *
981  * This was originally mean for debugging purposes, or by a sophisticated
982  * client library to determine how best to use the available buffers (e.g.,
983  * large buffers can be used for image transfer).
984  *
985  * \param inode device inode.
986  * \param filp file pointer.
987  * \param cmd command.
988  * \param arg pointer to a drm_buf_info structure.
989  * \return zero on success or a negative number on failure.
990  *
991  * Increments drm_device::buf_use while holding the drm_device::count_lock
992  * lock, preventing of allocating more buffers after this call. Information
993  * about each requested buffer is then copied into user space.
994  */
995 int drm_infobufs( struct inode *inode, struct file *filp,
996                    unsigned int cmd, unsigned long arg )
997 {
998         drm_file_t *priv = filp->private_data;
999         drm_device_t *dev = priv->head->dev;
1000         drm_device_dma_t *dma = dev->dma;
1001         drm_buf_info_t request;
1002         drm_buf_info_t __user *argp = (void __user *)arg;
1003         int i;
1004         int count;
1005
1006         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1007                 return -EINVAL;
1008
1009         if ( !dma ) return -EINVAL;
1010
1011         spin_lock( &dev->count_lock );
1012         if ( atomic_read( &dev->buf_alloc ) ) {
1013                 spin_unlock( &dev->count_lock );
1014                 return -EBUSY;
1015         }
1016         ++dev->buf_use;         /* Can't allocate more after this call */
1017         spin_unlock( &dev->count_lock );
1018
1019         if ( copy_from_user( &request, argp, sizeof(request) ) )
1020                 return -EFAULT;
1021
1022         for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
1023                 if ( dma->bufs[i].buf_count ) ++count;
1024         }
1025
1026         DRM_DEBUG( "count = %d\n", count );
1027
1028         if ( request.count >= count ) {
1029                 for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
1030                         if ( dma->bufs[i].buf_count ) {
1031                                 drm_buf_desc_t __user *to = &request.list[count];
1032                                 drm_buf_entry_t *from = &dma->bufs[i];
1033                                 drm_freelist_t *list = &dma->bufs[i].freelist;
1034                                 if ( copy_to_user( &to->count,
1035                                                    &from->buf_count,
1036                                                    sizeof(from->buf_count) ) ||
1037                                      copy_to_user( &to->size,
1038                                                    &from->buf_size,
1039                                                    sizeof(from->buf_size) ) ||
1040                                      copy_to_user( &to->low_mark,
1041                                                    &list->low_mark,
1042                                                    sizeof(list->low_mark) ) ||
1043                                      copy_to_user( &to->high_mark,
1044                                                    &list->high_mark,
1045                                                    sizeof(list->high_mark) ) )
1046                                         return -EFAULT;
1047
1048                                 DRM_DEBUG( "%d %d %d %d %d\n",
1049                                            i,
1050                                            dma->bufs[i].buf_count,
1051                                            dma->bufs[i].buf_size,
1052                                            dma->bufs[i].freelist.low_mark,
1053                                            dma->bufs[i].freelist.high_mark );
1054                                 ++count;
1055                         }
1056                 }
1057         }
1058         request.count = count;
1059
1060         if ( copy_to_user( argp, &request, sizeof(request) ) )
1061                 return -EFAULT;
1062
1063         return 0;
1064 }
1065
1066 /**
1067  * Specifies a low and high water mark for buffer allocation
1068  *
1069  * \param inode device inode.
1070  * \param filp file pointer.
1071  * \param cmd command.
1072  * \param arg a pointer to a drm_buf_desc structure.
1073  * \return zero on success or a negative number on failure.
1074  *
1075  * Verifies that the size order is bounded between the admissible orders and
1076  * updates the respective drm_device_dma::bufs entry low and high water mark.
1077  *
1078  * \note This ioctl is deprecated and mostly never used.
1079  */
1080 int drm_markbufs( struct inode *inode, struct file *filp,
1081                    unsigned int cmd, unsigned long arg )
1082 {
1083         drm_file_t *priv = filp->private_data;
1084         drm_device_t *dev = priv->head->dev;
1085         drm_device_dma_t *dma = dev->dma;
1086         drm_buf_desc_t request;
1087         int order;
1088         drm_buf_entry_t *entry;
1089
1090         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1091                 return -EINVAL;
1092
1093         if ( !dma ) return -EINVAL;
1094
1095         if ( copy_from_user( &request,
1096                              (drm_buf_desc_t __user *)arg,
1097                              sizeof(request) ) )
1098                 return -EFAULT;
1099
1100         DRM_DEBUG( "%d, %d, %d\n",
1101                    request.size, request.low_mark, request.high_mark );
1102         order = drm_order( request.size );
1103         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
1104         entry = &dma->bufs[order];
1105
1106         if ( request.low_mark < 0 || request.low_mark > entry->buf_count )
1107                 return -EINVAL;
1108         if ( request.high_mark < 0 || request.high_mark > entry->buf_count )
1109                 return -EINVAL;
1110
1111         entry->freelist.low_mark  = request.low_mark;
1112         entry->freelist.high_mark = request.high_mark;
1113
1114         return 0;
1115 }
1116
1117 /**
1118  * Unreserve the buffers in list, previously reserved using drmDMA. 
1119  *
1120  * \param inode device inode.
1121  * \param filp file pointer.
1122  * \param cmd command.
1123  * \param arg pointer to a drm_buf_free structure.
1124  * \return zero on success or a negative number on failure.
1125  * 
1126  * Calls free_buffer() for each used buffer.
1127  * This function is primarily used for debugging.
1128  */
1129 int drm_freebufs( struct inode *inode, struct file *filp,
1130                    unsigned int cmd, unsigned long arg )
1131 {
1132         drm_file_t *priv = filp->private_data;
1133         drm_device_t *dev = priv->head->dev;
1134         drm_device_dma_t *dma = dev->dma;
1135         drm_buf_free_t request;
1136         int i;
1137         int idx;
1138         drm_buf_t *buf;
1139
1140         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1141                 return -EINVAL;
1142
1143         if ( !dma ) return -EINVAL;
1144
1145         if ( copy_from_user( &request,
1146                              (drm_buf_free_t __user *)arg,
1147                              sizeof(request) ) )
1148                 return -EFAULT;
1149
1150         DRM_DEBUG( "%d\n", request.count );
1151         for ( i = 0 ; i < request.count ; i++ ) {
1152                 if ( copy_from_user( &idx,
1153                                      &request.list[i],
1154                                      sizeof(idx) ) )
1155                         return -EFAULT;
1156                 if ( idx < 0 || idx >= dma->buf_count ) {
1157                         DRM_ERROR( "Index %d (of %d max)\n",
1158                                    idx, dma->buf_count - 1 );
1159                         return -EINVAL;
1160                 }
1161                 buf = dma->buflist[idx];
1162                 if ( buf->filp != filp ) {
1163                         DRM_ERROR( "Process %d freeing buffer not owned\n",
1164                                    current->pid );
1165                         return -EINVAL;
1166                 }
1167                 drm_free_buffer( dev, buf );
1168         }
1169
1170         return 0;
1171 }
1172
1173 /**
1174  * Maps all of the DMA buffers into client-virtual space (ioctl).
1175  *
1176  * \param inode device inode.
1177  * \param filp file pointer.
1178  * \param cmd command.
1179  * \param arg pointer to a drm_buf_map structure.
1180  * \return zero on success or a negative number on failure.
1181  *
1182  * Maps the AGP or SG buffer region with do_mmap(), and copies information
1183  * about each buffer into user space. The PCI buffers are already mapped on the
1184  * addbufs_pci() call.
1185  */
1186 int drm_mapbufs( struct inode *inode, struct file *filp,
1187                   unsigned int cmd, unsigned long arg )
1188 {
1189         drm_file_t *priv = filp->private_data;
1190         drm_device_t *dev = priv->head->dev;
1191         drm_device_dma_t *dma = dev->dma;
1192         drm_buf_map_t __user *argp = (void __user *)arg;
1193         int retcode = 0;
1194         const int zero = 0;
1195         unsigned long virtual;
1196         unsigned long address;
1197         drm_buf_map_t request;
1198         int i;
1199
1200         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1201                 return -EINVAL;
1202
1203         if ( !dma ) return -EINVAL;
1204
1205         spin_lock( &dev->count_lock );
1206         if ( atomic_read( &dev->buf_alloc ) ) {
1207                 spin_unlock( &dev->count_lock );
1208                 return -EBUSY;
1209         }
1210         dev->buf_use++;         /* Can't allocate more after this call */
1211         spin_unlock( &dev->count_lock );
1212
1213         if ( copy_from_user( &request, argp, sizeof(request) ) )
1214                 return -EFAULT;
1215
1216         if ( request.count >= dma->buf_count ) {
1217                 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) ||
1218                     (drm_core_check_feature(dev, DRIVER_SG) && (dma->flags & _DRM_DMA_USE_SG)) ) {
1219                         drm_map_t *map = dev->agp_buffer_map;
1220
1221                         if ( !map ) {
1222                                 retcode = -EINVAL;
1223                                 goto done;
1224                         }
1225
1226 #if LINUX_VERSION_CODE <= 0x020402
1227                         down( &current->mm->mmap_sem );
1228 #else
1229                         down_write( &current->mm->mmap_sem );
1230 #endif
1231                         virtual = do_mmap( filp, 0, map->size,
1232                                            PROT_READ | PROT_WRITE,
1233                                            MAP_SHARED,
1234                                            (unsigned long)map->offset );
1235 #if LINUX_VERSION_CODE <= 0x020402
1236                         up( &current->mm->mmap_sem );
1237 #else
1238                         up_write( &current->mm->mmap_sem );
1239 #endif
1240                 } else {
1241 #if LINUX_VERSION_CODE <= 0x020402
1242                         down( &current->mm->mmap_sem );
1243 #else
1244                         down_write( &current->mm->mmap_sem );
1245 #endif
1246                         virtual = do_mmap( filp, 0, dma->byte_count,
1247                                            PROT_READ | PROT_WRITE,
1248                                            MAP_SHARED, 0 );
1249 #if LINUX_VERSION_CODE <= 0x020402
1250                         up( &current->mm->mmap_sem );
1251 #else
1252                         up_write( &current->mm->mmap_sem );
1253 #endif
1254                 }
1255                 if ( virtual > -1024UL ) {
1256                         /* Real error */
1257                         retcode = (signed long)virtual;
1258                         goto done;
1259                 }
1260                 request.virtual = (void __user *)virtual;
1261
1262                 for ( i = 0 ; i < dma->buf_count ; i++ ) {
1263                         if ( copy_to_user( &request.list[i].idx,
1264                                            &dma->buflist[i]->idx,
1265                                            sizeof(request.list[0].idx) ) ) {
1266                                 retcode = -EFAULT;
1267                                 goto done;
1268                         }
1269                         if ( copy_to_user( &request.list[i].total,
1270                                            &dma->buflist[i]->total,
1271                                            sizeof(request.list[0].total) ) ) {
1272                                 retcode = -EFAULT;
1273                                 goto done;
1274                         }
1275                         if ( copy_to_user( &request.list[i].used,
1276                                            &zero,
1277                                            sizeof(zero) ) ) {
1278                                 retcode = -EFAULT;
1279                                 goto done;
1280                         }
1281                         address = virtual + dma->buflist[i]->offset; /* *** */
1282                         if ( copy_to_user( &request.list[i].address,
1283                                            &address,
1284                                            sizeof(address) ) ) {
1285                                 retcode = -EFAULT;
1286                                 goto done;
1287                         }
1288                 }
1289         }
1290  done:
1291         request.count = dma->buf_count;
1292         DRM_DEBUG( "%d buffers, retcode = %d\n", request.count, retcode );
1293
1294         if ( copy_to_user( argp, &request, sizeof(request) ) )
1295                 return -EFAULT;
1296
1297         return retcode;
1298 }
1299