fbab0362c081134cecf55793a6f9b31f8a132921
[linux-2.6.git] / arch / arm / mach-tegra / iovmm-gart.c
1 /*
2  * arch/arm/mach-tegra/iovmm-gart.c
3  *
4  * Tegra I/O VMM implementation for GART devices in Tegra and Tegra 2 series
5  * systems-on-a-chip.
6  *
7  * Copyright (c) 2010, NVIDIA Corporation.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful, but WITHOUT
15  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
17  * more details.
18  *
19  * You should have received a copy of the GNU General Public License along
20  * with this program; if not, write to the Free Software Foundation, Inc.,
21  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
22  */
23
24 #include <linux/module.h>
25 #include <linux/platform_device.h>
26 #include <linux/spinlock.h>
27 #include <linux/slab.h>
28 #include <linux/vmalloc.h>
29 #include <linux/mm.h>
30 #include <asm/io.h>
31 #include <asm/cacheflush.h>
32
33 #include <mach/iovmm.h>
34
35 #if defined(CONFIG_ARCH_TEGRA_2x_SOC)
36 #define GART_CONFIG             0x24
37 #define GART_ENTRY_ADDR         0x28
38 #define GART_ENTRY_DATA         0x2c
39 #endif
40
41 #define VMM_NAME "iovmm-gart"
42 #define DRIVER_NAME "tegra_gart"
43
44 #define GART_PAGE_SHIFT (12)
45 #define GART_PAGE_MASK (~((1<<GART_PAGE_SHIFT)-1))
46
47 struct gart_device {
48         void __iomem            *regs;
49         u32                     *savedata;
50         u32                     page_count; /* total remappable size */
51         tegra_iovmm_addr_t      iovmm_base; /* offset to apply to vmm_area */
52         spinlock_t              pte_lock;
53         struct tegra_iovmm_device iovmm;
54         struct tegra_iovmm_domain domain;
55         bool                    enable;
56         bool                    needs_barrier; /* emulator WAR */
57 };
58
59 static int gart_map(struct tegra_iovmm_device *, struct tegra_iovmm_area *);
60 static void gart_unmap(struct tegra_iovmm_device *,
61         struct tegra_iovmm_area *, bool);
62 static void gart_map_pfn(struct tegra_iovmm_device *,
63         struct tegra_iovmm_area *, tegra_iovmm_addr_t, unsigned long);
64 static struct tegra_iovmm_domain *gart_alloc_domain(
65         struct tegra_iovmm_device *, struct tegra_iovmm_client *);
66
67 static int gart_probe(struct platform_device *);
68 static int gart_remove(struct platform_device *);
69 static int gart_suspend(struct tegra_iovmm_device *dev);
70 static void gart_resume(struct tegra_iovmm_device *dev);
71
72
73 static struct tegra_iovmm_device_ops tegra_iovmm_gart_ops = {
74         .map            = gart_map,
75         .unmap          = gart_unmap,
76         .map_pfn        = gart_map_pfn,
77         .alloc_domain   = gart_alloc_domain,
78         .suspend        = gart_suspend,
79         .resume         = gart_resume,
80 };
81
82 static struct platform_driver tegra_iovmm_gart_drv = {
83         .probe          = gart_probe,
84         .remove         = gart_remove,
85         .driver         = {
86                 .name   = DRIVER_NAME,
87         },
88 };
89
90 static int gart_suspend(struct tegra_iovmm_device *dev)
91 {
92         struct gart_device *gart = container_of(dev, struct gart_device, iovmm);
93         unsigned int i;
94         unsigned long reg;
95
96         if (!gart)
97                 return -ENODEV;
98
99         if (!gart->enable)
100                 return 0;
101
102         spin_lock(&gart->pte_lock);
103         reg = gart->iovmm_base;
104         for (i=0; i<gart->page_count; i++) {
105                 writel(reg, gart->regs + GART_ENTRY_ADDR);
106                 gart->savedata[i] = readl(gart->regs + GART_ENTRY_DATA);
107                 dmb();
108                 reg += 1 << GART_PAGE_SHIFT;
109         }
110         spin_unlock(&gart->pte_lock);
111         return 0;
112 }
113
114 static void do_gart_setup(struct gart_device *gart, const u32 *data)
115 {
116         unsigned long reg;
117         unsigned int i;
118
119         writel(1, gart->regs + GART_CONFIG);
120
121         reg = gart->iovmm_base;
122         for (i=0; i<gart->page_count; i++) {
123                 writel(reg, gart->regs + GART_ENTRY_ADDR);
124                 writel((data) ? data[i] : 0, gart->regs + GART_ENTRY_DATA);
125                 wmb();
126                 reg += 1 << GART_PAGE_SHIFT;
127         }
128         wmb();
129 }
130
131 static void gart_resume(struct tegra_iovmm_device *dev)
132 {
133         struct gart_device *gart = container_of(dev, struct gart_device, iovmm);
134
135         if (!gart || !gart->enable || (gart->enable && !gart->savedata))
136                 return;
137
138         spin_lock(&gart->pte_lock);
139         do_gart_setup(gart, gart->savedata);
140         spin_unlock(&gart->pte_lock);
141 }
142
143 static int gart_remove(struct platform_device *pdev)
144 {
145         struct gart_device *gart = platform_get_drvdata(pdev);
146
147         if (!gart)
148                 return 0;
149
150         if (gart->enable)
151                 writel(0, gart->regs + GART_CONFIG);
152
153         gart->enable = 0;
154         platform_set_drvdata(pdev, NULL);
155         tegra_iovmm_unregister(&gart->iovmm);
156         if (gart->savedata)
157                 vfree(gart->savedata);
158         if (gart->regs)
159                 iounmap(gart->regs);
160         kfree(gart);
161         return 0;
162 }
163
164 static int gart_probe(struct platform_device *pdev)
165 {
166         struct gart_device *gart = NULL;
167         struct resource *res, *res_remap;
168         void __iomem *gart_regs = NULL;
169         int e;
170
171         if (!pdev) {
172                 pr_err(DRIVER_NAME ": platform_device required\n");
173                 return -ENODEV;
174         }
175
176         if (PAGE_SHIFT != GART_PAGE_SHIFT) {
177                 pr_err(DRIVER_NAME ": GART and CPU page size must match\n");
178                 return -ENXIO;
179         }
180
181         /* the GART memory aperture is required */
182         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
183         res_remap = platform_get_resource(pdev, IORESOURCE_MEM, 1);
184
185         if (!res || !res_remap) {
186                 pr_err(DRIVER_NAME ": GART memory aperture expected\n");
187                 return -ENXIO;
188         }
189         gart = kzalloc(sizeof(*gart), GFP_KERNEL);
190         if (!gart) {
191                 pr_err(DRIVER_NAME ": failed to allocate tegra_iovmm_device\n");
192                 e = -ENOMEM;
193                 goto fail;
194         }
195
196         gart_regs = ioremap_wc(res->start, res->end - res->start + 1);
197         if (!gart_regs) {
198                 pr_err(DRIVER_NAME ": failed to remap GART registers\n");
199                 e = -ENXIO;
200                 goto fail;
201         }
202
203         gart->iovmm.name = VMM_NAME;
204         gart->iovmm.ops = &tegra_iovmm_gart_ops;
205         gart->iovmm.pgsize_bits = GART_PAGE_SHIFT;
206         spin_lock_init(&gart->pte_lock);
207
208         platform_set_drvdata(pdev, gart);
209
210         e = tegra_iovmm_register(&gart->iovmm);
211         if (e) goto fail;
212
213         e = tegra_iovmm_domain_init(&gart->domain, &gart->iovmm,
214                 (tegra_iovmm_addr_t)res_remap->start,
215                 (tegra_iovmm_addr_t)res_remap->end+1);
216         if (e) goto fail;
217
218         gart->regs = gart_regs;
219         gart->iovmm_base = (tegra_iovmm_addr_t)res_remap->start;
220         gart->page_count = res_remap->end - res_remap->start + 1;
221         gart->page_count >>= GART_PAGE_SHIFT;
222
223         gart->savedata = vmalloc(sizeof(u32)*gart->page_count);
224         if (!gart->savedata) {
225                 pr_err(DRIVER_NAME ": failed to allocate context save area\n");
226                 e = -ENOMEM;
227                 goto fail;
228         }
229
230         spin_lock(&gart->pte_lock);
231
232         do_gart_setup(gart, NULL);
233         gart->enable = 1;
234
235         spin_unlock(&gart->pte_lock);
236         return 0;
237
238 fail:
239         if (gart_regs)
240                 iounmap(gart_regs);
241         if (gart && gart->savedata)
242                 vfree(gart->savedata);
243         if (gart)
244                 kfree(gart);
245         return e;
246 }
247
248 static int __devinit gart_init(void)
249 {
250         return platform_driver_register(&tegra_iovmm_gart_drv);
251 }
252
253 static void __exit gart_exit(void)
254 {
255         return platform_driver_unregister(&tegra_iovmm_gart_drv);
256 }
257
258 #define GART_PTE(_pfn) (0x80000000ul | ((_pfn)<<PAGE_SHIFT))
259
260
261 static int gart_map(struct tegra_iovmm_device *dev,
262         struct tegra_iovmm_area *iovma)
263 {
264         struct gart_device *gart = container_of(dev, struct gart_device, iovmm);
265         unsigned long gart_page, count;
266         unsigned int i;
267
268         gart_page = iovma->iovm_start;
269         count = iovma->iovm_length >> GART_PAGE_SHIFT;
270
271         for (i=0; i<count; i++) {
272                 unsigned long pfn;
273
274                 pfn = iovma->ops->lock_makeresident(iovma, i<<PAGE_SHIFT);
275                 if (!pfn_valid(pfn))
276                         goto fail;
277
278                 spin_lock(&gart->pte_lock);
279
280                 writel(gart_page, gart->regs + GART_ENTRY_ADDR);
281                 writel(GART_PTE(pfn), gart->regs + GART_ENTRY_DATA);
282                 wmb();
283                 gart_page += 1 << GART_PAGE_SHIFT;
284
285                 spin_unlock(&gart->pte_lock);
286         }
287         wmb();
288         return 0;
289
290 fail:
291         spin_lock(&gart->pte_lock);
292         while (i--) {
293                 iovma->ops->release(iovma, i<<PAGE_SHIFT);
294                 gart_page -= 1 << GART_PAGE_SHIFT;
295                 writel(gart_page, gart->regs + GART_ENTRY_ADDR);
296                 writel(0, gart->regs + GART_ENTRY_DATA);
297                 wmb();
298         }
299         spin_unlock(&gart->pte_lock);
300         wmb();
301         return -ENOMEM;
302 }
303
304 static void gart_unmap(struct tegra_iovmm_device *dev,
305         struct tegra_iovmm_area *iovma, bool decommit)
306 {
307         struct gart_device *gart = container_of(dev, struct gart_device, iovmm);
308         unsigned long gart_page, count;
309         unsigned int i;
310
311         count = iovma->iovm_length >> GART_PAGE_SHIFT;
312         gart_page = iovma->iovm_start;
313
314         spin_lock(&gart->pte_lock);
315         for (i=0; i<count; i++) {
316                 if (iovma->ops && iovma->ops->release)
317                         iovma->ops->release(iovma, i<<PAGE_SHIFT);
318
319                 writel(gart_page, gart->regs + GART_ENTRY_ADDR);
320                 writel(0, gart->regs + GART_ENTRY_DATA);
321                 wmb();
322                 gart_page += 1 << GART_PAGE_SHIFT;
323         }
324         spin_unlock(&gart->pte_lock);
325         wmb();
326 }
327
328 static void gart_map_pfn(struct tegra_iovmm_device *dev,
329         struct tegra_iovmm_area *iovma, tegra_iovmm_addr_t offs,
330         unsigned long pfn)
331 {
332         struct gart_device *gart = container_of(dev, struct gart_device, iovmm);
333
334         BUG_ON(!pfn_valid(pfn));
335         spin_lock(&gart->pte_lock);
336         writel(offs, gart->regs + GART_ENTRY_ADDR);
337         writel(GART_PTE(pfn), gart->regs + GART_ENTRY_DATA);
338         wmb();
339         spin_unlock(&gart->pte_lock);
340         wmb();
341 }
342
343 static struct tegra_iovmm_domain *gart_alloc_domain(
344         struct tegra_iovmm_device *dev, struct tegra_iovmm_client *client)
345 {
346         struct gart_device *gart = container_of(dev, struct gart_device, iovmm);
347         return &gart->domain;
348 }
349
350 subsys_initcall(gart_init);
351 module_exit(gart_exit);