rtc: tps80031: register as mfd sub device
[linux-2.6.git] / arch / arm / mach-tegra / iovmm-gart.c
1 /*
2  * arch/arm/mach-tegra/iovmm-gart.c
3  *
4  * Tegra I/O VMM implementation for GART devices in Tegra and Tegra 2 series
5  * systems-on-a-chip.
6  *
7  * Copyright (c) 2010-2012 NVIDIA Corporation.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful, but WITHOUT
15  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
17  * more details.
18  *
19  * You should have received a copy of the GNU General Public License along
20  * with this program; if not, write to the Free Software Foundation, Inc.,
21  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
22  */
23
24 #include <linux/module.h>
25 #include <linux/platform_device.h>
26 #include <linux/spinlock.h>
27 #include <linux/slab.h>
28 #include <linux/vmalloc.h>
29 #include <linux/mm.h>
30 #include <linux/io.h>
31
32 #include <asm/cacheflush.h>
33
34 #include <mach/iovmm.h>
35
36 #define GART_CONFIG             0x24
37 #define GART_ENTRY_ADDR         0x28
38 #define GART_ENTRY_DATA         0x2c
39
40 #define VMM_NAME "iovmm-gart"
41 #define DRIVER_NAME "tegra_gart"
42
43 #define GART_PAGE_SHIFT         12
44 #define GART_PAGE_SIZE          (1 << GART_PAGE_SHIFT)
45 #define GART_PAGE_MASK          (~(GART_PAGE_SIZE - 1))
46
47 struct gart_device {
48         void __iomem            *regs;
49         u32                     *savedata;
50         u32                     page_count; /* total remappable size */
51         tegra_iovmm_addr_t      iovmm_base; /* offset to apply to vmm_area */
52         spinlock_t              pte_lock;
53         struct tegra_iovmm_device iovmm;
54         struct tegra_iovmm_domain domain;
55         bool                    enable;
56 };
57
58 /*
59  * Any interaction between any block on PPSB and a block on APB or AHB
60  * must have these read-back to ensure the APB/AHB bus transaction is
61  * complete before initiating activity on the PPSB block.
62  */
63 #define FLUSH_GART_REGS(gart)   (void)readl((gart)->regs + GART_CONFIG)
64
65 static inline void gart_set_pte(struct gart_device *gart,
66                                 tegra_iovmm_addr_t offs, u32 pte)
67 {
68         writel(offs, gart->regs + GART_ENTRY_ADDR);
69         writel(pte, gart->regs + GART_ENTRY_DATA);
70 }
71
72 static int gart_map(struct tegra_iovmm_domain *, struct tegra_iovmm_area *);
73 static void gart_unmap(struct tegra_iovmm_domain *,
74         struct tegra_iovmm_area *, bool);
75 static void gart_map_pfn(struct tegra_iovmm_domain *,
76         struct tegra_iovmm_area *, unsigned long, unsigned long);
77 static struct tegra_iovmm_domain *gart_alloc_domain(
78         struct tegra_iovmm_device *, struct tegra_iovmm_client *);
79
80 static int gart_probe(struct platform_device *);
81 static int gart_remove(struct platform_device *);
82 static int gart_suspend(struct tegra_iovmm_device *dev);
83 static void gart_resume(struct tegra_iovmm_device *dev);
84
85
86 static struct tegra_iovmm_device_ops tegra_iovmm_gart_ops = {
87         .map            = gart_map,
88         .unmap          = gart_unmap,
89         .map_pfn        = gart_map_pfn,
90         .alloc_domain   = gart_alloc_domain,
91         .suspend        = gart_suspend,
92         .resume         = gart_resume,
93 };
94
95 static struct platform_driver tegra_iovmm_gart_drv = {
96         .probe          = gart_probe,
97         .remove         = gart_remove,
98         .driver         = {
99                 .name   = DRIVER_NAME,
100         },
101 };
102
103 static int gart_suspend(struct tegra_iovmm_device *dev)
104 {
105         struct gart_device *gart = container_of(dev, struct gart_device, iovmm);
106         unsigned int i;
107         unsigned long reg;
108
109         if (!gart)
110                 return -ENODEV;
111
112         if (!gart->enable)
113                 return 0;
114
115         spin_lock(&gart->pte_lock);
116         reg = gart->iovmm_base;
117         for (i = 0; i < gart->page_count; i++) {
118                 writel(reg, gart->regs + GART_ENTRY_ADDR);
119                 gart->savedata[i] = readl(gart->regs + GART_ENTRY_DATA);
120                 reg += GART_PAGE_SIZE;
121         }
122         spin_unlock(&gart->pte_lock);
123         return 0;
124 }
125
126 static void do_gart_setup(struct gart_device *gart, const u32 *data)
127 {
128         unsigned long reg;
129         unsigned int i;
130
131         reg = gart->iovmm_base;
132         for (i = 0; i < gart->page_count; i++) {
133                 gart_set_pte(gart, reg, data ? data[i] : 0);
134                 reg += GART_PAGE_SIZE;
135         }
136         writel(1, gart->regs + GART_CONFIG);
137         FLUSH_GART_REGS(gart);
138 }
139
140 static void gart_resume(struct tegra_iovmm_device *dev)
141 {
142         struct gart_device *gart = container_of(dev, struct gart_device, iovmm);
143
144         if (!gart || !gart->enable || (gart->enable && !gart->savedata))
145                 return;
146
147         spin_lock(&gart->pte_lock);
148         do_gart_setup(gart, gart->savedata);
149         spin_unlock(&gart->pte_lock);
150 }
151
152 static int gart_remove(struct platform_device *pdev)
153 {
154         struct gart_device *gart = platform_get_drvdata(pdev);
155
156         if (!gart)
157                 return 0;
158
159         if (gart->enable)
160                 writel(0, gart->regs + GART_CONFIG);
161
162         gart->enable = 0;
163         platform_set_drvdata(pdev, NULL);
164         tegra_iovmm_unregister(&gart->iovmm);
165         if (gart->savedata)
166                 vfree(gart->savedata);
167         if (gart->regs)
168                 iounmap(gart->regs);
169         kfree(gart);
170         return 0;
171 }
172
173 static int gart_probe(struct platform_device *pdev)
174 {
175         struct gart_device *gart;
176         struct resource *res, *res_remap;
177         void __iomem *gart_regs;
178         int e;
179
180         if (!pdev) {
181                 pr_err(DRIVER_NAME ": platform_device required\n");
182                 return -ENODEV;
183         }
184
185         if (PAGE_SHIFT != GART_PAGE_SHIFT) {
186                 pr_err(DRIVER_NAME ": GART and CPU page size must match\n");
187                 return -ENXIO;
188         }
189
190         /* the GART memory aperture is required */
191         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
192         res_remap = platform_get_resource(pdev, IORESOURCE_MEM, 1);
193
194         if (!res || !res_remap) {
195                 pr_err(DRIVER_NAME ": GART memory aperture expected\n");
196                 return -ENXIO;
197         }
198         gart = kzalloc(sizeof(*gart), GFP_KERNEL);
199         if (!gart) {
200                 pr_err(DRIVER_NAME ": failed to allocate tegra_iovmm_device\n");
201                 return -ENOMEM;
202         }
203
204         gart_regs = ioremap(res->start, res->end - res->start + 1);
205         if (!gart_regs) {
206                 pr_err(DRIVER_NAME ": failed to remap GART registers\n");
207                 e = -ENXIO;
208                 goto fail;
209         }
210
211         gart->iovmm.name = VMM_NAME;
212         gart->iovmm.ops = &tegra_iovmm_gart_ops;
213         gart->iovmm.pgsize_bits = GART_PAGE_SHIFT;
214         spin_lock_init(&gart->pte_lock);
215
216         platform_set_drvdata(pdev, gart);
217
218         e = tegra_iovmm_register(&gart->iovmm);
219         if (e)
220                 goto fail;
221
222         e = tegra_iovmm_domain_init(&gart->domain, &gart->iovmm,
223                 (tegra_iovmm_addr_t)res_remap->start,
224                 (tegra_iovmm_addr_t)res_remap->end+1);
225         if (e)
226                 goto fail;
227
228         gart->regs = gart_regs;
229         gart->iovmm_base = (tegra_iovmm_addr_t)res_remap->start;
230         gart->page_count = resource_size(res_remap);
231         gart->page_count >>= GART_PAGE_SHIFT;
232
233         gart->savedata = vmalloc(sizeof(u32) * gart->page_count);
234         if (!gart->savedata) {
235                 pr_err(DRIVER_NAME ": failed to allocate context save area\n");
236                 e = -ENOMEM;
237                 goto fail;
238         }
239
240         do_gart_setup(gart, NULL);
241         gart->enable = 1;
242
243         return 0;
244
245 fail:
246         if (gart_regs)
247                 iounmap(gart_regs);
248         if (gart && gart->savedata)
249                 vfree(gart->savedata);
250         kfree(gart);
251         return e;
252 }
253
254 static int __devinit gart_init(void)
255 {
256         return platform_driver_register(&tegra_iovmm_gart_drv);
257 }
258
259 static void __exit gart_exit(void)
260 {
261         platform_driver_unregister(&tegra_iovmm_gart_drv);
262 }
263
264 #define GART_PTE(_pfn) (0x80000000ul | ((_pfn) << PAGE_SHIFT))
265
266
267 static int gart_map(struct tegra_iovmm_domain *domain,
268         struct tegra_iovmm_area *iovma)
269 {
270         struct gart_device *gart =
271                 container_of(domain, struct gart_device, domain);
272         unsigned long gart_page, count;
273         unsigned int i;
274
275         gart_page = iovma->iovm_start;
276         count = iovma->iovm_length >> GART_PAGE_SHIFT;
277
278         for (i = 0; i < count; i++) {
279                 unsigned long pfn;
280
281                 pfn = iovma->ops->lock_makeresident(iovma, i<<PAGE_SHIFT);
282                 if (!pfn_valid(pfn))
283                         goto fail;
284
285                 spin_lock(&gart->pte_lock);
286
287                 gart_set_pte(gart, gart_page, GART_PTE(pfn));
288                 FLUSH_GART_REGS(gart);
289                 gart_page += GART_PAGE_SIZE;
290
291                 spin_unlock(&gart->pte_lock);
292         }
293
294         return 0;
295
296 fail:
297         spin_lock(&gart->pte_lock);
298         while (i--) {
299                 iovma->ops->release(iovma, i << PAGE_SHIFT);
300                 gart_page -= GART_PAGE_SIZE;
301                 gart_set_pte(gart, gart_page, 0);
302         }
303         FLUSH_GART_REGS(gart);
304         spin_unlock(&gart->pte_lock);
305
306         return -ENOMEM;
307 }
308
309 static void gart_unmap(struct tegra_iovmm_domain *domain,
310         struct tegra_iovmm_area *iovma, bool decommit)
311 {
312         struct gart_device *gart =
313                 container_of(domain, struct gart_device, domain);
314         unsigned long gart_page, count;
315         unsigned int i;
316
317         count = iovma->iovm_length >> GART_PAGE_SHIFT;
318         gart_page = iovma->iovm_start;
319
320         spin_lock(&gart->pte_lock);
321         for (i = 0; i < count; i++) {
322                 if (iovma->ops && iovma->ops->release)
323                         iovma->ops->release(iovma, i << PAGE_SHIFT);
324
325                 gart_set_pte(gart, gart_page, 0);
326                 gart_page += GART_PAGE_SIZE;
327         }
328         FLUSH_GART_REGS(gart);
329         spin_unlock(&gart->pte_lock);
330 }
331
332 static void gart_map_pfn(struct tegra_iovmm_domain *domain,
333         struct tegra_iovmm_area *iovma, unsigned long offs,
334         unsigned long pfn)
335 {
336         struct gart_device *gart =
337                 container_of(domain, struct gart_device, domain);
338
339         BUG_ON(!pfn_valid(pfn));
340         spin_lock(&gart->pte_lock);
341         gart_set_pte(gart, offs, GART_PTE(pfn));
342         FLUSH_GART_REGS(gart);
343         spin_unlock(&gart->pte_lock);
344 }
345
346 static struct tegra_iovmm_domain *gart_alloc_domain(
347         struct tegra_iovmm_device *dev, struct tegra_iovmm_client *client)
348 {
349         struct gart_device *gart = container_of(dev, struct gart_device, iovmm);
350         return &gart->domain;
351 }
352
353 subsys_initcall(gart_init);
354 module_exit(gart_exit);