include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[linux-3.10.git] / arch / xtensa / kernel / pci-dma.c
1 /*
2  * arch/xtensa/kernel/pci-dma.c
3  *
4  * DMA coherent memory allocation.
5  *
6  * This program is free software; you can redistribute  it and/or modify it
7  * under  the terms of  the GNU General  Public License as published by the
8  * Free Software Foundation;  either version 2 of the  License, or (at your
9  * option) any later version.
10  *
11  * Copyright (C) 2002 - 2005 Tensilica Inc.
12  *
13  * Based on version for i386.
14  *
15  * Chris Zankel <chris@zankel.net>
16  * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
17  */
18
19 #include <linux/types.h>
20 #include <linux/mm.h>
21 #include <linux/string.h>
22 #include <linux/pci.h>
23 #include <linux/gfp.h>
24 #include <asm/io.h>
25 #include <asm/cacheflush.h>
26
27 /*
28  * Note: We assume that the full memory space is always mapped to 'kseg'
29  *       Otherwise we have to use page attributes (not implemented).
30  */
31
32 void *
33 dma_alloc_coherent(struct device *dev,size_t size,dma_addr_t *handle,gfp_t flag)
34 {
35         unsigned long ret;
36         unsigned long uncached = 0;
37
38         /* ignore region speicifiers */
39
40         flag &= ~(__GFP_DMA | __GFP_HIGHMEM);
41
42         if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
43                 flag |= GFP_DMA;
44         ret = (unsigned long)__get_free_pages(flag, get_order(size));
45
46         if (ret == 0)
47                 return NULL;
48
49         /* We currently don't support coherent memory outside KSEG */
50
51         if (ret < XCHAL_KSEG_CACHED_VADDR
52             || ret >= XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE)
53                 BUG();
54
55
56         if (ret != 0) {
57                 memset((void*) ret, 0, size);
58                 uncached = ret+XCHAL_KSEG_BYPASS_VADDR-XCHAL_KSEG_CACHED_VADDR;
59                 *handle = virt_to_bus((void*)ret);
60                 __flush_invalidate_dcache_range(ret, size);
61         }
62
63         return (void*)uncached;
64 }
65
66 void dma_free_coherent(struct device *hwdev, size_t size,
67                          void *vaddr, dma_addr_t dma_handle)
68 {
69         long addr=(long)vaddr+XCHAL_KSEG_CACHED_VADDR-XCHAL_KSEG_BYPASS_VADDR;
70
71         if (addr < 0 || addr >= XCHAL_KSEG_SIZE)
72                 BUG();
73
74         free_pages(addr, get_order(size));
75 }
76
77
78 void consistent_sync(void *vaddr, size_t size, int direction)
79 {
80         switch (direction) {
81         case PCI_DMA_NONE:
82                 BUG();
83         case PCI_DMA_FROMDEVICE:        /* invalidate only */
84                 __invalidate_dcache_range((unsigned long)vaddr,
85                                           (unsigned long)size);
86                 break;
87
88         case PCI_DMA_TODEVICE:          /* writeback only */
89         case PCI_DMA_BIDIRECTIONAL:     /* writeback and invalidate */
90                 __flush_invalidate_dcache_range((unsigned long)vaddr,
91                                                 (unsigned long)size);
92                 break;
93         }
94 }