rtc: tps80031: register as mfd sub device
[linux-2.6.git] / arch / arm / mach-tegra / iovmm-smmu.c
1 /*
2  * arch/arm/mach-tegra/iovmm-smmu.c
3  *
4  * Tegra I/O VMM implementation for SMMU devices for Tegra 3 series
5  * systems-on-a-chip.
6  *
7  * Copyright (c) 2010-2012, NVIDIA CORPORATION.  All rights reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful, but WITHOUT
15  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
17  * more details.
18  *
19  * You should have received a copy of the GNU General Public License along
20  * with this program; if not, write to the Free Software Foundation, Inc.,
21  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
22  */
23
24 #include <linux/module.h>
25 #include <linux/platform_device.h>
26 #include <linux/spinlock.h>
27 #include <linux/slab.h>
28 #include <linux/vmalloc.h>
29 #include <linux/mm.h>
30 #include <linux/pagemap.h>
31 #include <linux/sysfs.h>
32 #include <linux/device.h>
33 #include <linux/sched.h>
34 #include <linux/io.h>
35
36 #include <asm/page.h>
37 #include <asm/cacheflush.h>
38
39 #include <mach/iovmm.h>
40 #include <mach/iomap.h>
41 #include <mach/tegra_smmu.h>
42
43 #ifndef CONFIG_ARCH_TEGRA_2x_SOC
44 /*
45  * ALL-CAP macros copied from armc.h
46  */
47 #define MC_SMMU_CONFIG_0                                0x10
48 #define MC_SMMU_CONFIG_0_SMMU_ENABLE_DISABLE            0
49 #define MC_SMMU_CONFIG_0_SMMU_ENABLE_ENABLE             1
50
51 #define MC_SMMU_TLB_CONFIG_0                            0x14
52 #define MC_SMMU_TLB_CONFIG_0_TLB_STATS__MASK            (1 << 31)
53 #define MC_SMMU_TLB_CONFIG_0_TLB_STATS__ENABLE          (1 << 31)
54 #define MC_SMMU_TLB_CONFIG_0_TLB_HIT_UNDER_MISS__ENABLE (1 << 29)
55 #define MC_SMMU_TLB_CONFIG_0_TLB_ACTIVE_LINES__VALUE    0x10
56 #define MC_SMMU_TLB_CONFIG_0_RESET_VAL                  0x20000010
57
58 #define MC_SMMU_PTC_CONFIG_0                            0x18
59 #define MC_SMMU_PTC_CONFIG_0_PTC_STATS__MASK            (1 << 31)
60 #define MC_SMMU_PTC_CONFIG_0_PTC_STATS__ENABLE          (1 << 31)
61 #define MC_SMMU_PTC_CONFIG_0_PTC_CACHE__ENABLE          (1 << 29)
62 #define MC_SMMU_PTC_CONFIG_0_PTC_INDEX_MAP__PATTERN     0x3f
63 #define MC_SMMU_PTC_CONFIG_0_RESET_VAL                  0x2000003f
64
65 #define MC_SMMU_PTB_ASID_0                              0x1c
66 #define MC_SMMU_PTB_ASID_0_CURRENT_ASID_SHIFT           0
67
68 #define MC_SMMU_PTB_DATA_0                              0x20
69 #define MC_SMMU_PTB_DATA_0_RESET_VAL                    0
70 #define MC_SMMU_PTB_DATA_0_ASID_NONSECURE_SHIFT         29
71 #define MC_SMMU_PTB_DATA_0_ASID_WRITABLE_SHIFT          30
72 #define MC_SMMU_PTB_DATA_0_ASID_READABLE_SHIFT          31
73
74 #define MC_SMMU_TLB_FLUSH_0                             0x30
75 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_MATCH_ALL              0
76 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_MATCH_SECTION          2
77 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_MATCH_GROUP            3
78 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_SHIFT                29
79 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_DISABLE        0
80 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_ENABLE         1
81 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_SHIFT          31
82
83 #define MC_SMMU_PTC_FLUSH_0                             0x34
84 #define MC_SMMU_PTC_FLUSH_0_PTC_FLUSH_TYPE_ALL          0
85 #define MC_SMMU_PTC_FLUSH_0_PTC_FLUSH_TYPE_ADR          1
86 #define MC_SMMU_PTC_FLUSH_0_PTC_FLUSH_ADR_SHIFT         4
87
88 #define MC_SMMU_ASID_SECURITY_0                         0x38
89
90 #define MC_SMMU_STATS_TLB_HIT_COUNT_0                   0x1f0
91 #define MC_SMMU_STATS_TLB_MISS_COUNT_0                  0x1f4
92 #define MC_SMMU_STATS_PTC_HIT_COUNT_0                   0x1f8
93 #define MC_SMMU_STATS_PTC_MISS_COUNT_0                  0x1fc
94
95 #define MC_SMMU_TRANSLATION_ENABLE_0_0                  0x228
96 #define MC_SMMU_TRANSLATION_ENABLE_1_0                  0x22c
97 #define MC_SMMU_TRANSLATION_ENABLE_2_0                  0x230
98
99 #define MC_SMMU_AFI_ASID_0              0x238   /* PCIE */
100 #define MC_SMMU_AVPC_ASID_0             0x23c   /* AVP */
101 #define MC_SMMU_DC_ASID_0               0x240   /* Display controller */
102 #define MC_SMMU_DCB_ASID_0              0x244   /* Display controller B */
103 #define MC_SMMU_EPP_ASID_0              0x248   /* Encoder pre-processor */
104 #define MC_SMMU_G2_ASID_0               0x24c   /* 2D engine */
105 #define MC_SMMU_HC_ASID_0               0x250   /* Host1x */
106 #define MC_SMMU_HDA_ASID_0              0x254   /* High-def audio */
107 #define MC_SMMU_ISP_ASID_0              0x258   /* Image signal processor */
108 #define MC_SMMU_MPE_ASID_0              0x264   /* MPEG encoder */
109 #define MC_SMMU_NV_ASID_0               0x268   /* (3D) */
110 #define MC_SMMU_NV2_ASID_0              0x26c   /* (3D) */
111 #define MC_SMMU_PPCS_ASID_0             0x270   /* AHB */
112 #define MC_SMMU_SATA_ASID_0             0x278   /* SATA */
113 #define MC_SMMU_VDE_ASID_0              0x27c   /* Video decoder */
114 #define MC_SMMU_VI_ASID_0               0x280   /* Video input */
115
116 #define SMMU_PDE_NEXT_SHIFT             28
117
118 /* Copied from arahb_arbc.h */
119 #define AHB_ARBITRATION_XBAR_CTRL_0     0xe0
120 #define AHB_ARBITRATION_XBAR_CTRL_0_SMMU_INIT_DONE_DONE         1
121 #define AHB_ARBITRATION_XBAR_CTRL_0_SMMU_INIT_DONE_SHIFT        17
122
123 #endif
124
125 #define MC_SMMU_NUM_ASIDS       4
126 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_SECTION__MASK          0xffc00000
127 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_SECTION__SHIFT 12 /* right shift */
128 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_GROUP__MASK            0xffffc000
129 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_GROUP__SHIFT   12 /* right shift */
130 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA(iova, which)   \
131         ((((iova) & MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_##which##__MASK) >> \
132                 MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_##which##__SHIFT) |    \
133         MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_MATCH_##which)
134 #define MC_SMMU_PTB_ASID_0_CURRENT_ASID(n)      \
135                 ((n) << MC_SMMU_PTB_ASID_0_CURRENT_ASID_SHIFT)
136 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_disable                \
137                 (MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_DISABLE <<    \
138                         MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_SHIFT)
139 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH__ENABLE                \
140                 (MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_ENABLE <<     \
141                         MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_SHIFT)
142
143 #define VMM_NAME "iovmm-smmu"
144 #define DRIVER_NAME "tegra_smmu"
145
146 #define SMMU_PAGE_SHIFT 12
147 #define SMMU_PAGE_SIZE  (1 << SMMU_PAGE_SHIFT)
148
149 #define SMMU_PDIR_COUNT 1024
150 #define SMMU_PDIR_SIZE  (sizeof(unsigned long) * SMMU_PDIR_COUNT)
151 #define SMMU_PTBL_COUNT 1024
152 #define SMMU_PTBL_SIZE  (sizeof(unsigned long) * SMMU_PTBL_COUNT)
153 #define SMMU_PDIR_SHIFT 12
154 #define SMMU_PDE_SHIFT  12
155 #define SMMU_PTE_SHIFT  12
156 #define SMMU_PFN_MASK   0x000fffff
157
158 #define SMMU_ADDR_TO_PFN(addr)  ((addr) >> 12)
159 #define SMMU_ADDR_TO_PDN(addr)  ((addr) >> 22)
160 #define SMMU_PDN_TO_ADDR(addr)  ((pdn) << 22)
161
162 #define _READABLE       (1 << MC_SMMU_PTB_DATA_0_ASID_READABLE_SHIFT)
163 #define _WRITABLE       (1 << MC_SMMU_PTB_DATA_0_ASID_WRITABLE_SHIFT)
164 #define _NONSECURE      (1 << MC_SMMU_PTB_DATA_0_ASID_NONSECURE_SHIFT)
165 #define _PDE_NEXT       (1 << SMMU_PDE_NEXT_SHIFT)
166 #define _MASK_ATTR      (_READABLE | _WRITABLE | _NONSECURE)
167
168 #define _PDIR_ATTR      (_READABLE | _WRITABLE | _NONSECURE)
169
170 #define _PDE_ATTR       (_READABLE | _WRITABLE | _NONSECURE)
171 #define _PDE_ATTR_N     (_PDE_ATTR | _PDE_NEXT)
172 #define _PDE_VACANT(pdn)        (((pdn) << 10) | _PDE_ATTR)
173
174 #define _PTE_ATTR       (_READABLE | _WRITABLE | _NONSECURE)
175 #define _PTE_VACANT(addr)       (((addr) >> SMMU_PAGE_SHIFT) | _PTE_ATTR)
176
177 #define SMMU_MK_PDIR(page, attr)        \
178                 ((page_to_phys(page) >> SMMU_PDIR_SHIFT) | (attr))
179 #define SMMU_MK_PDE(page, attr)         \
180                 (unsigned long)((page_to_phys(page) >> SMMU_PDE_SHIFT) | (attr))
181 #define SMMU_EX_PTBL_PAGE(pde)          \
182                 pfn_to_page((unsigned long)(pde) & SMMU_PFN_MASK)
183 #define SMMU_PFN_TO_PTE(pfn, attr)      (unsigned long)((pfn) | (attr))
184
185 #define SMMU_ASID_ENABLE(asid)  ((asid) | (1 << 31))
186 #define SMMU_ASID_DISABLE       0
187 #define SMMU_ASID_ASID(n)       ((n) & ~SMMU_ASID_ENABLE(0))
188
189 /* Keep this as a "natural" enumeration (no assignments) */
190 enum smmu_hwclient {
191         HWC_AFI,
192         HWC_AVPC,
193         HWC_DC,
194         HWC_DCB,
195         HWC_EPP,
196         HWC_G2,
197         HWC_HC,
198         HWC_HDA,
199         HWC_ISP,
200         HWC_MPE,
201         HWC_NV,
202         HWC_NV2,
203         HWC_PPCS,
204         HWC_SATA,
205         HWC_VDE,
206         HWC_VI,
207
208         HWC_COUNT
209 };
210
211 struct smmu_hwc_state {
212         unsigned long reg;
213         unsigned long enable_disable;
214 };
215
216 /* Hardware client mapping initializer */
217 #define HWC_INIT(client)        \
218         [HWC_##client] = {MC_SMMU_##client##_ASID_0, SMMU_ASID_DISABLE},
219
220 static const struct smmu_hwc_state smmu_hwc_state_init[] = {
221         HWC_INIT(AFI)
222         HWC_INIT(AVPC)
223         HWC_INIT(DC)
224         HWC_INIT(DCB)
225         HWC_INIT(EPP)
226         HWC_INIT(G2)
227         HWC_INIT(HC)
228         HWC_INIT(HDA)
229         HWC_INIT(ISP)
230         HWC_INIT(MPE)
231         HWC_INIT(NV)
232         HWC_INIT(NV2)
233         HWC_INIT(PPCS)
234         HWC_INIT(SATA)
235         HWC_INIT(VDE)
236         HWC_INIT(VI)
237 };
238
239
240 struct domain_hwc_map {
241         const char *dev_name;
242         const enum smmu_hwclient *hwcs;
243         const unsigned int nr_hwcs;
244 };
245
246 /* Enable all hardware clients for SMMU translation */
247 static const enum smmu_hwclient nvmap_hwcs[] = {
248         HWC_AFI,
249         HWC_AVPC,
250         HWC_DC,
251         HWC_DCB,
252         HWC_EPP,
253         HWC_G2,
254         HWC_HC,
255         HWC_HDA,
256         HWC_ISP,
257         HWC_MPE,
258         HWC_NV,
259         HWC_NV2,
260         HWC_PPCS,
261         HWC_SATA,
262         HWC_VDE,
263         HWC_VI
264 };
265
266 static const struct domain_hwc_map smmu_hwc_map[] = {
267         {
268                 .dev_name = "nvmap",
269                 .hwcs = nvmap_hwcs,
270                 .nr_hwcs = ARRAY_SIZE(nvmap_hwcs),
271         },
272 };
273
274 /*
275  * Per address space
276  */
277 struct smmu_as {
278         struct smmu_device      *smmu;  /* back pointer to container */
279         unsigned int            asid;
280         const struct domain_hwc_map     *hwclients;
281         struct mutex    lock;   /* for pagetable */
282         struct tegra_iovmm_domain domain;
283         struct page     *pdir_page;
284         unsigned long   pdir_attr;
285         unsigned long   pde_attr;
286         unsigned long   pte_attr;
287         unsigned int    *pte_count;
288         struct device   sysfs_dev;
289         int             sysfs_use_count;
290 };
291
292 /*
293  * Per SMMU device
294  */
295 struct smmu_device {
296         void __iomem    *regs, *regs_ahbarb;
297         tegra_iovmm_addr_t      iovmm_base;     /* remappable base address */
298         unsigned long   page_count;             /* total remappable size */
299         spinlock_t      lock;
300         char            *name;
301         struct tegra_iovmm_device iovmm_dev;
302         int             num_ases;
303         struct smmu_as  *as;                    /* Run-time allocated array */
304         struct smmu_hwc_state   hwc_state[HWC_COUNT];
305         struct device   sysfs_dev;
306         int             sysfs_use_count;
307         bool            enable;
308         struct page *avp_vector_page;   /* dummy page shared by all AS's */
309
310         /*
311          * Register image savers for suspend/resume
312          */
313         unsigned long config_0;         /* Secure reg */
314         unsigned long tlb_config_0;
315         unsigned long ptc_config_0;
316         unsigned long ptb_asid_0;
317         unsigned long translation_enable_0_0;   /* Secure reg */
318         unsigned long translation_enable_1_0;   /* Secure reg */
319         unsigned long translation_enable_2_0;   /* Secure reg */
320         unsigned long asid_security_0;  /* Secure reg */
321
322         unsigned long lowest_asid;      /* Variables for hardware testing */
323         unsigned long debug_asid;
324         unsigned long signature_pid;    /* For debugging aid */
325 };
326
327 #define VA_PAGE_TO_PA(va, page) \
328         (page_to_phys(page) + ((unsigned long)(va) & ~PAGE_MASK))
329
330 #define FLUSH_CPU_DCACHE(va, page, size)        \
331         do {    \
332                 unsigned long _pa_ = VA_PAGE_TO_PA(va, page);           \
333                 __cpuc_flush_dcache_area((void *)(va), (size_t)(size)); \
334                 outer_flush_range(_pa_, _pa_+(size_t)(size));           \
335         } while (0)
336
337 /*
338  * Any interaction between any block on PPSB and a block on APB or AHB
339  * must have these read-back to ensure the APB/AHB bus transaction is
340  * complete before initiating activity on the PPSB block.
341  */
342 #define FLUSH_SMMU_REGS(smmu) (void)readl((smmu)->regs + MC_SMMU_PTB_DATA_0)
343
344 /*
345  * Flush all TLB entries and all PTC entries
346  * Caller must lock smmu
347  */
348 static void smmu_flush_regs(struct smmu_device *smmu)
349 {
350         writel(MC_SMMU_PTC_FLUSH_0_PTC_FLUSH_TYPE_ALL,
351                 smmu->regs + MC_SMMU_PTC_FLUSH_0);
352         FLUSH_SMMU_REGS(smmu);
353         writel(MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_MATCH_ALL |
354                         MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_disable,
355                 smmu->regs + MC_SMMU_TLB_FLUSH_0);
356         FLUSH_SMMU_REGS(smmu);
357 }
358
359 static void smmu_setup_regs(struct smmu_device *smmu)
360 {
361         int i;
362
363         if (smmu->as) {
364                 int asid;
365
366                 /* Set/restore page directory for each AS */
367                 for (asid = 0; asid < smmu->num_ases; asid++) {
368                         struct smmu_as *as = &smmu->as[asid];
369
370                         writel(MC_SMMU_PTB_ASID_0_CURRENT_ASID(as->asid),
371                                 as->smmu->regs + MC_SMMU_PTB_ASID_0);
372                         writel(as->pdir_page
373                                 ? SMMU_MK_PDIR(as->pdir_page, as->pdir_attr)
374                                 : MC_SMMU_PTB_DATA_0_RESET_VAL,
375                                 as->smmu->regs + MC_SMMU_PTB_DATA_0);
376                 }
377         }
378
379         /* Set/restore ASID for each hardware client */
380         for (i = 0; i < HWC_COUNT; i++) {
381                 struct smmu_hwc_state *hwcst = &smmu->hwc_state[i];
382                 writel(hwcst->enable_disable, smmu->regs + hwcst->reg);
383         }
384
385         writel(smmu->translation_enable_0_0,
386                 smmu->regs + MC_SMMU_TRANSLATION_ENABLE_0_0);
387         writel(smmu->translation_enable_1_0,
388                 smmu->regs + MC_SMMU_TRANSLATION_ENABLE_1_0);
389         writel(smmu->translation_enable_2_0,
390                 smmu->regs + MC_SMMU_TRANSLATION_ENABLE_2_0);
391         writel(smmu->asid_security_0, smmu->regs + MC_SMMU_ASID_SECURITY_0);
392         writel(smmu->ptb_asid_0,      smmu->regs + MC_SMMU_PTB_ASID_0);
393         writel(smmu->ptc_config_0,    smmu->regs + MC_SMMU_PTC_CONFIG_0);
394         writel(smmu->tlb_config_0,    smmu->regs + MC_SMMU_TLB_CONFIG_0);
395         writel(smmu->config_0,        smmu->regs + MC_SMMU_CONFIG_0);
396
397         smmu_flush_regs(smmu);
398
399         writel(
400                 readl(smmu->regs_ahbarb + AHB_ARBITRATION_XBAR_CTRL_0) |
401                 (AHB_ARBITRATION_XBAR_CTRL_0_SMMU_INIT_DONE_DONE <<
402                         AHB_ARBITRATION_XBAR_CTRL_0_SMMU_INIT_DONE_SHIFT),
403                 smmu->regs_ahbarb + AHB_ARBITRATION_XBAR_CTRL_0);
404 }
405
406 static int smmu_suspend(struct tegra_iovmm_device *dev)
407 {
408         struct smmu_device *smmu =
409                 container_of(dev, struct smmu_device, iovmm_dev);
410
411         smmu->config_0     = readl(smmu->regs + MC_SMMU_CONFIG_0);
412         smmu->tlb_config_0 = readl(smmu->regs + MC_SMMU_TLB_CONFIG_0);
413         smmu->ptc_config_0 = readl(smmu->regs + MC_SMMU_PTC_CONFIG_0);
414         smmu->ptb_asid_0   = readl(smmu->regs + MC_SMMU_PTB_ASID_0);
415         smmu->translation_enable_0_0 =
416                 readl(smmu->regs + MC_SMMU_TRANSLATION_ENABLE_0_0);
417         smmu->translation_enable_1_0 =
418                 readl(smmu->regs + MC_SMMU_TRANSLATION_ENABLE_1_0);
419         smmu->translation_enable_2_0 =
420                 readl(smmu->regs + MC_SMMU_TRANSLATION_ENABLE_2_0);
421         smmu->asid_security_0 =
422                 readl(smmu->regs + MC_SMMU_ASID_SECURITY_0);
423         return 0;
424 }
425
426 static void smmu_resume(struct tegra_iovmm_device *dev)
427 {
428         struct smmu_device *smmu =
429                 container_of(dev, struct smmu_device, iovmm_dev);
430
431         if (!smmu->enable)
432                 return;
433
434         spin_lock(&smmu->lock);
435         smmu_setup_regs(smmu);
436         spin_unlock(&smmu->lock);
437 }
438
439 static void flush_ptc_and_tlb(struct smmu_device *smmu,
440                 struct smmu_as *as, unsigned long iova,
441                 unsigned long *pte, struct page *ptpage, int is_pde)
442 {
443         unsigned long tlb_flush_va = is_pde
444                         ?  MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA(iova, SECTION)
445                         :  MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA(iova, GROUP);
446
447         writel(MC_SMMU_PTC_FLUSH_0_PTC_FLUSH_TYPE_ADR |
448                 VA_PAGE_TO_PA(pte, ptpage),
449                 smmu->regs + MC_SMMU_PTC_FLUSH_0);
450         FLUSH_SMMU_REGS(smmu);
451         writel(tlb_flush_va |
452                 MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH__ENABLE |
453                 (as->asid << MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_SHIFT),
454                 smmu->regs + MC_SMMU_TLB_FLUSH_0);
455         FLUSH_SMMU_REGS(smmu);
456 }
457
458 static void free_ptbl(struct smmu_as *as, unsigned long iova)
459 {
460         unsigned long pdn = SMMU_ADDR_TO_PDN(iova);
461         unsigned long *pdir = (unsigned long *)kmap(as->pdir_page);
462
463         if (pdir[pdn] != _PDE_VACANT(pdn)) {
464                 pr_debug("%s:%d pdn=%lx\n", __func__, __LINE__, pdn);
465
466                 ClearPageReserved(SMMU_EX_PTBL_PAGE(pdir[pdn]));
467                 __free_page(SMMU_EX_PTBL_PAGE(pdir[pdn]));
468                 pdir[pdn] = _PDE_VACANT(pdn);
469                 FLUSH_CPU_DCACHE(&pdir[pdn], as->pdir_page, sizeof pdir[pdn]);
470                 flush_ptc_and_tlb(as->smmu, as, iova, &pdir[pdn],
471                                 as->pdir_page, 1);
472         }
473         kunmap(as->pdir_page);
474 }
475
476 static void free_pdir(struct smmu_as *as)
477 {
478         if (as->pdir_page) {
479                 unsigned addr = as->smmu->iovmm_base;
480                 int count = as->smmu->page_count;
481
482                 while (count-- > 0) {
483                         free_ptbl(as, addr);
484                         addr += SMMU_PAGE_SIZE * SMMU_PTBL_COUNT;
485                 }
486                 ClearPageReserved(as->pdir_page);
487                 __free_page(as->pdir_page);
488                 as->pdir_page = NULL;
489                 kfree(as->pte_count);
490                 as->pte_count = NULL;
491         }
492 }
493
494 static int smmu_remove(struct platform_device *pdev)
495 {
496         struct smmu_device *smmu = platform_get_drvdata(pdev);
497
498         if (!smmu)
499                 return 0;
500
501         if (smmu->enable) {
502                 writel(MC_SMMU_CONFIG_0_SMMU_ENABLE_DISABLE,
503                         smmu->regs + MC_SMMU_CONFIG_0);
504                 smmu->enable = 0;
505         }
506         platform_set_drvdata(pdev, NULL);
507
508         if (smmu->as) {
509                 int asid;
510
511                 for (asid = 0; asid < smmu->num_ases; asid++)
512                         free_pdir(&smmu->as[asid]);
513                 kfree(smmu->as);
514         }
515
516         if (smmu->avp_vector_page)
517                 __free_page(smmu->avp_vector_page);
518         if (smmu->regs)
519                 iounmap(smmu->regs);
520         if (smmu->regs_ahbarb)
521                 iounmap(smmu->regs_ahbarb);
522         tegra_iovmm_unregister(&smmu->iovmm_dev);
523         kfree(smmu);
524         return 0;
525 }
526
527 /*
528  * Maps PTBL for given iova and returns the PTE address
529  * Caller must unmap the mapped PTBL returned in *ptbl_page_p
530  */
531 static unsigned long *locate_pte(struct smmu_as *as,
532                 unsigned long iova, bool allocate,
533                 struct page **ptbl_page_p,
534                 unsigned int **pte_counter)
535 {
536         unsigned long ptn = SMMU_ADDR_TO_PFN(iova);
537         unsigned long pdn = SMMU_ADDR_TO_PDN(iova);
538         unsigned long *pdir = kmap(as->pdir_page);
539         unsigned long *ptbl;
540
541         if (pdir[pdn] != _PDE_VACANT(pdn)) {
542                 /* Mapped entry table already exists */
543                 *ptbl_page_p = SMMU_EX_PTBL_PAGE(pdir[pdn]);
544                 ptbl = kmap(*ptbl_page_p);
545         } else if (!allocate) {
546                 kunmap(as->pdir_page);
547                 return NULL;
548         } else {
549                 /* Vacant - allocate a new page table */
550                 pr_debug("%s:%d new PTBL pdn=%lx\n", __func__, __LINE__, pdn);
551
552                 *ptbl_page_p = alloc_page(GFP_KERNEL | __GFP_DMA);
553                 if (!*ptbl_page_p) {
554                         kunmap(as->pdir_page);
555                         pr_err(DRIVER_NAME
556                         ": failed to allocate tegra_iovmm_device page table\n");
557                         return NULL;
558                 }
559                 SetPageReserved(*ptbl_page_p);
560                 ptbl = (unsigned long *)kmap(*ptbl_page_p);
561                 {
562                         int pn;
563                         unsigned long addr = SMMU_PDN_TO_ADDR(pdn);
564                         for (pn = 0; pn < SMMU_PTBL_COUNT;
565                                 pn++, addr += SMMU_PAGE_SIZE) {
566                                 ptbl[pn] = _PTE_VACANT(addr);
567                         }
568                 }
569                 FLUSH_CPU_DCACHE(ptbl, *ptbl_page_p, SMMU_PTBL_SIZE);
570                 pdir[pdn] = SMMU_MK_PDE(*ptbl_page_p,
571                                 as->pde_attr | _PDE_NEXT);
572                 FLUSH_CPU_DCACHE(&pdir[pdn], as->pdir_page, sizeof pdir[pdn]);
573                 flush_ptc_and_tlb(as->smmu, as, iova, &pdir[pdn],
574                                 as->pdir_page, 1);
575         }
576         *pte_counter = &as->pte_count[pdn];
577
578         kunmap(as->pdir_page);
579         return &ptbl[ptn % SMMU_PTBL_COUNT];
580 }
581
582 static void put_signature(struct smmu_as *as,
583                         unsigned long addr, unsigned long pfn)
584 {
585         if (as->smmu->signature_pid == current->pid) {
586                 struct page *page = pfn_to_page(pfn);
587                 unsigned long *vaddr = kmap(page);
588                 if (vaddr) {
589                         vaddr[0] = addr;
590                         vaddr[1] = pfn << PAGE_SHIFT;
591                         FLUSH_CPU_DCACHE(vaddr, page, sizeof(vaddr[0]) * 2);
592                         kunmap(page);
593                 }
594         }
595 }
596
597 static int smmu_map(struct tegra_iovmm_domain *domain,
598                 struct tegra_iovmm_area *iovma)
599 {
600         struct smmu_as *as = container_of(domain, struct smmu_as, domain);
601         unsigned long addr = iovma->iovm_start;
602         unsigned long pcount = iovma->iovm_length >> SMMU_PAGE_SHIFT;
603         int i;
604
605         pr_debug("%s:%d iova=%lx asid=%d\n", __func__, __LINE__,
606                  addr, as - as->smmu->as);
607
608         for (i = 0; i < pcount; i++) {
609                 unsigned long pfn;
610                 unsigned long *pte;
611                 unsigned int *pte_counter;
612                 struct page *ptpage;
613
614                 pfn = iovma->ops->lock_makeresident(iovma, i << PAGE_SHIFT);
615                 if (!pfn_valid(pfn))
616                         goto fail;
617
618                 mutex_lock(&as->lock);
619
620                 pte = locate_pte(as, addr, true, &ptpage, &pte_counter);
621                 if (!pte)
622                         goto fail2;
623
624                 pr_debug("%s:%d iova=%lx pfn=%lx asid=%d\n",
625                          __func__, __LINE__, addr, pfn, as - as->smmu->as);
626
627                 if (*pte == _PTE_VACANT(addr))
628                         (*pte_counter)++;
629                 *pte = SMMU_PFN_TO_PTE(pfn, as->pte_attr);
630                 if (unlikely((*pte == _PTE_VACANT(addr))))
631                         (*pte_counter)--;
632                 FLUSH_CPU_DCACHE(pte, ptpage, sizeof *pte);
633                 flush_ptc_and_tlb(as->smmu, as, addr, pte, ptpage, 0);
634                 kunmap(ptpage);
635                 mutex_unlock(&as->lock);
636                 put_signature(as, addr, pfn);
637                 addr += SMMU_PAGE_SIZE;
638         }
639         return 0;
640
641 fail:
642         mutex_lock(&as->lock);
643 fail2:
644
645         while (i-- > 0) {
646                 unsigned long *pte;
647                 unsigned int *pte_counter;
648                 struct page *page;
649
650                 iovma->ops->release(iovma, i<<PAGE_SHIFT);
651                 addr -= SMMU_PAGE_SIZE;
652                 pte = locate_pte(as, addr, false, &page, &pte_counter);
653                 if (pte) {
654                         if (*pte != _PTE_VACANT(addr)) {
655                                 *pte = _PTE_VACANT(addr);
656                                 FLUSH_CPU_DCACHE(pte, page, sizeof *pte);
657                                 flush_ptc_and_tlb(as->smmu, as, addr, pte,
658                                                 page, 0);
659                                 kunmap(page);
660                                 if (!--(*pte_counter))
661                                         free_ptbl(as, addr);
662                         } else {
663                                 kunmap(page);
664                         }
665                 }
666         }
667         mutex_unlock(&as->lock);
668         return -ENOMEM;
669 }
670
671 static void smmu_unmap(struct tegra_iovmm_domain *domain,
672         struct tegra_iovmm_area *iovma, bool decommit)
673 {
674         struct smmu_as *as = container_of(domain, struct smmu_as, domain);
675         unsigned long addr = iovma->iovm_start;
676         unsigned int pcount = iovma->iovm_length >> SMMU_PAGE_SHIFT;
677         unsigned int i, *pte_counter;
678
679         pr_debug("%s:%d iova=%lx asid=%d\n", __func__, __LINE__,
680                  addr, as - as->smmu->as);
681
682         mutex_lock(&as->lock);
683         for (i = 0; i < pcount; i++) {
684                 unsigned long *pte;
685                 struct page *page;
686
687                 if (iovma->ops && iovma->ops->release)
688                         iovma->ops->release(iovma, i << PAGE_SHIFT);
689
690                 pte = locate_pte(as, addr, false, &page, &pte_counter);
691                 if (pte) {
692                         if (*pte != _PTE_VACANT(addr)) {
693                                 *pte = _PTE_VACANT(addr);
694                                 FLUSH_CPU_DCACHE(pte, page, sizeof *pte);
695                                 flush_ptc_and_tlb(as->smmu, as, addr, pte,
696                                                 page, 0);
697                                 kunmap(page);
698                                 if (!--(*pte_counter) && decommit)
699                                         free_ptbl(as, addr);
700                         }
701                 }
702                 addr += SMMU_PAGE_SIZE;
703         }
704         mutex_unlock(&as->lock);
705 }
706
707 static void smmu_map_pfn(struct tegra_iovmm_domain *domain,
708         struct tegra_iovmm_area *iovma, unsigned long addr,
709         unsigned long pfn)
710 {
711         struct smmu_as *as = container_of(domain, struct smmu_as, domain);
712         struct smmu_device *smmu = as->smmu;
713         unsigned long *pte;
714         unsigned int *pte_counter;
715         struct page *ptpage;
716
717         pr_debug("%s:%d iova=%lx pfn=%lx asid=%d\n", __func__, __LINE__,
718                  (unsigned long)addr, pfn, as - as->smmu->as);
719
720         BUG_ON(!pfn_valid(pfn));
721         mutex_lock(&as->lock);
722         pte = locate_pte(as, addr, true, &ptpage, &pte_counter);
723         if (pte) {
724                 if (*pte == _PTE_VACANT(addr))
725                         (*pte_counter)++;
726                 *pte = SMMU_PFN_TO_PTE(pfn, as->pte_attr);
727                 if (unlikely((*pte == _PTE_VACANT(addr))))
728                         (*pte_counter)--;
729                 FLUSH_CPU_DCACHE(pte, ptpage, sizeof *pte);
730                 flush_ptc_and_tlb(smmu, as, addr, pte, ptpage, 0);
731                 kunmap(ptpage);
732                 put_signature(as, addr, pfn);
733         }
734         mutex_unlock(&as->lock);
735 }
736
737 /*
738  * Caller must lock/unlock as
739  */
740 static int alloc_pdir(struct smmu_as *as)
741 {
742         unsigned long *pdir;
743         int pdn;
744
745         if (as->pdir_page)
746                 return 0;
747
748         as->pte_count = kzalloc(sizeof(as->pte_count[0]) * SMMU_PDIR_COUNT,
749                                 GFP_KERNEL);
750         if (!as->pte_count) {
751                 pr_err(DRIVER_NAME
752                 ": failed to allocate tegra_iovmm_device PTE cunters\n");
753                 return -ENOMEM;
754         }
755         as->pdir_page = alloc_page(GFP_KERNEL | __GFP_DMA);
756         if (!as->pdir_page) {
757                 pr_err(DRIVER_NAME
758                 ": failed to allocate tegra_iovmm_device page directory\n");
759                 kfree(as->pte_count);
760                 as->pte_count = NULL;
761                 return -ENOMEM;
762         }
763         SetPageReserved(as->pdir_page);
764         pdir = kmap(as->pdir_page);
765
766         for (pdn = 0; pdn < SMMU_PDIR_COUNT; pdn++)
767                 pdir[pdn] = _PDE_VACANT(pdn);
768         FLUSH_CPU_DCACHE(pdir, as->pdir_page, SMMU_PDIR_SIZE);
769         writel(MC_SMMU_PTC_FLUSH_0_PTC_FLUSH_TYPE_ADR |
770                 VA_PAGE_TO_PA(pdir, as->pdir_page),
771                 as->smmu->regs + MC_SMMU_PTC_FLUSH_0);
772         FLUSH_SMMU_REGS(as->smmu);
773         writel(MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_MATCH_ALL |
774                 MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH__ENABLE |
775                 (as->asid << MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_SHIFT),
776                 as->smmu->regs + MC_SMMU_TLB_FLUSH_0);
777         FLUSH_SMMU_REGS(as->smmu);
778         kunmap(as->pdir_page);
779
780         return 0;
781 }
782
783 static void _sysfs_create(struct smmu_as *as, struct device *sysfs_parent);
784
785 /*
786  * Allocate resources for an AS
787  *      TODO: split into "alloc" and "lock"
788  */
789 static struct tegra_iovmm_domain *smmu_alloc_domain(
790         struct tegra_iovmm_device *dev, struct tegra_iovmm_client *client)
791 {
792         struct smmu_device *smmu =
793                 container_of(dev, struct smmu_device, iovmm_dev);
794         struct smmu_as *as = NULL;
795         const struct domain_hwc_map *map = NULL;
796         int asid, i;
797
798         /* Look for a free AS */
799         for  (asid = smmu->lowest_asid; asid < smmu->num_ases; asid++) {
800                 mutex_lock(&smmu->as[asid].lock);
801                 if (!smmu->as[asid].hwclients) {
802                         as = &smmu->as[asid];
803                         break;
804                 }
805                 mutex_unlock(&smmu->as[asid].lock);
806         }
807
808         if (!as) {
809                 pr_err(DRIVER_NAME ": no free AS\n");
810                 return NULL;
811         }
812
813         if (alloc_pdir(as) < 0)
814                 goto bad3;
815
816         /* Look for a matching hardware client group */
817         for (i = 0; ARRAY_SIZE(smmu_hwc_map); i++) {
818                 if (!strcmp(smmu_hwc_map[i].dev_name, client->misc_dev->name)) {
819                         map = &smmu_hwc_map[i];
820                         break;
821                 }
822         }
823
824         if (!map) {
825                 pr_err(DRIVER_NAME ": no SMMU resource for %s (%s)\n",
826                         client->name, client->misc_dev->name);
827                 goto bad2;
828         }
829
830         spin_lock(&smmu->lock);
831         /* Update PDIR register */
832         writel(MC_SMMU_PTB_ASID_0_CURRENT_ASID(as->asid),
833                 as->smmu->regs + MC_SMMU_PTB_ASID_0);
834         writel(SMMU_MK_PDIR(as->pdir_page, as->pdir_attr),
835                 as->smmu->regs + MC_SMMU_PTB_DATA_0);
836         FLUSH_SMMU_REGS(smmu);
837
838         /* Put each hardware client in the group into the address space */
839         for (i = 0; i < map->nr_hwcs; i++) {
840                 struct smmu_hwc_state *hwcst = &smmu->hwc_state[map->hwcs[i]];
841
842                 /* Is the hardware client busy? */
843                 if (hwcst->enable_disable != SMMU_ASID_DISABLE &&
844                         hwcst->enable_disable != SMMU_ASID_ENABLE(as->asid)) {
845                         pr_err(DRIVER_NAME
846                                 ": HW 0x%lx busy for ASID %ld (client!=%s)\n",
847                                 hwcst->reg,
848                                 SMMU_ASID_ASID(hwcst->enable_disable),
849                                 client->name);
850                         goto bad;
851                 }
852                 hwcst->enable_disable = SMMU_ASID_ENABLE(as->asid);
853                 writel(hwcst->enable_disable, smmu->regs + hwcst->reg);
854         }
855         FLUSH_SMMU_REGS(smmu);
856         spin_unlock(&smmu->lock);
857         as->hwclients = map;
858         _sysfs_create(as, client->misc_dev->this_device);
859         mutex_unlock(&as->lock);
860
861         /* Reserve "page zero" for AVP vectors using a common dummy page */
862         smmu_map_pfn(&as->domain, NULL, 0,
863                 page_to_phys(as->smmu->avp_vector_page) >> SMMU_PAGE_SHIFT);
864         return &as->domain;
865
866 bad:
867         /* Reset hardware clients that have been enabled */
868         while (--i >= 0) {
869                 struct smmu_hwc_state *hwcst = &smmu->hwc_state[map->hwcs[i]];
870
871                 hwcst->enable_disable = SMMU_ASID_DISABLE;
872                 writel(hwcst->enable_disable, smmu->regs + hwcst->reg);
873         }
874         FLUSH_SMMU_REGS(smmu);
875         spin_unlock(&as->smmu->lock);
876 bad2:
877         free_pdir(as);
878 bad3:
879         mutex_unlock(&as->lock);
880         return NULL;
881
882 }
883
884 /*
885  * Release resources for an AS
886  *      TODO: split into "unlock" and "free"
887  */
888 static void smmu_free_domain(
889         struct tegra_iovmm_domain *domain, struct tegra_iovmm_client *client)
890 {
891         struct smmu_as *as = container_of(domain, struct smmu_as, domain);
892         struct smmu_device *smmu = as->smmu;
893         const struct domain_hwc_map *map = NULL;
894         int i;
895
896         mutex_lock(&as->lock);
897         map = as->hwclients;
898
899         spin_lock(&smmu->lock);
900         for (i = 0; i < map->nr_hwcs; i++) {
901                 struct smmu_hwc_state *hwcst = &smmu->hwc_state[map->hwcs[i]];
902
903                 hwcst->enable_disable = SMMU_ASID_DISABLE;
904                 writel(SMMU_ASID_DISABLE, smmu->regs + hwcst->reg);
905         }
906         FLUSH_SMMU_REGS(smmu);
907         spin_unlock(&smmu->lock);
908
909         as->hwclients = NULL;
910         if (as->pdir_page) {
911                 spin_lock(&smmu->lock);
912                 writel(MC_SMMU_PTB_ASID_0_CURRENT_ASID(as->asid),
913                         smmu->regs + MC_SMMU_PTB_ASID_0);
914                 writel(MC_SMMU_PTB_DATA_0_RESET_VAL,
915                         smmu->regs + MC_SMMU_PTB_DATA_0);
916                 FLUSH_SMMU_REGS(smmu);
917                 spin_unlock(&smmu->lock);
918
919                 free_pdir(as);
920         }
921         mutex_unlock(&as->lock);
922 }
923
924 static struct tegra_iovmm_device_ops tegra_iovmm_smmu_ops = {
925         .map = smmu_map,
926         .unmap = smmu_unmap,
927         .map_pfn = smmu_map_pfn,
928         .alloc_domain = smmu_alloc_domain,
929         .free_domain = smmu_free_domain,
930         .suspend = smmu_suspend,
931         .resume = smmu_resume,
932 };
933
934 static int smmu_probe(struct platform_device *pdev)
935 {
936         struct smmu_device *smmu;
937         struct resource *regs, *regs2;
938         struct tegra_smmu_window *window;
939         int e, asid;
940
941         BUILD_BUG_ON(PAGE_SHIFT != SMMU_PAGE_SHIFT);
942         BUILD_BUG_ON(ARRAY_SIZE(smmu_hwc_state_init) != HWC_COUNT);
943
944         regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mc");
945         regs2 = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ahbarb");
946         window = tegra_smmu_window(0);
947
948         if (!regs || !regs2 || !window) {
949                 pr_err(DRIVER_NAME ": No SMMU resources\n");
950                 return -ENODEV;
951         }
952
953         smmu = kzalloc(sizeof(*smmu), GFP_KERNEL);
954         if (!smmu) {
955                 pr_err(DRIVER_NAME ": failed to allocate smmu_device\n");
956                 return -ENOMEM;
957         }
958
959         smmu->num_ases = MC_SMMU_NUM_ASIDS;
960         smmu->iovmm_base = (tegra_iovmm_addr_t)window->start;
961         smmu->page_count = (window->end + 1 - window->start) >> SMMU_PAGE_SHIFT;
962         smmu->regs = ioremap(regs->start, regs->end + 1 - regs->start);
963         smmu->regs_ahbarb =
964                 ioremap(regs2->start, regs2->end + 1 - regs2->start);
965         if (!smmu->regs || !smmu->regs_ahbarb) {
966                 pr_err(DRIVER_NAME ": failed to remap SMMU registers\n");
967                 e = -ENXIO;
968                 goto fail;
969         }
970
971         smmu->config_0        = MC_SMMU_CONFIG_0_SMMU_ENABLE_ENABLE;
972         smmu->tlb_config_0    = MC_SMMU_TLB_CONFIG_0_RESET_VAL;
973         smmu->ptc_config_0    = MC_SMMU_PTC_CONFIG_0_RESET_VAL;
974         smmu->ptb_asid_0      = 0;
975         smmu->translation_enable_0_0 = ~0;
976         smmu->translation_enable_1_0 = ~0;
977         smmu->translation_enable_2_0 = ~0;
978         smmu->asid_security_0 = 0;
979
980         memcpy(smmu->hwc_state, smmu_hwc_state_init, sizeof(smmu->hwc_state));
981
982         smmu->iovmm_dev.name = VMM_NAME;
983         smmu->iovmm_dev.ops = &tegra_iovmm_smmu_ops;
984         smmu->iovmm_dev.pgsize_bits = SMMU_PAGE_SHIFT;
985
986         e = tegra_iovmm_register(&smmu->iovmm_dev);
987         if (e)
988                 goto fail;
989
990         smmu->as = kzalloc(sizeof(smmu->as[0]) * smmu->num_ases, GFP_KERNEL);
991         if (!smmu->as) {
992                 pr_err(DRIVER_NAME ": failed to allocate smmu_as\n");
993                 e = -ENOMEM;
994                 goto fail;
995         }
996
997         /* Initialize address space structure array */
998         for (asid = 0; asid < smmu->num_ases; asid++) {
999                 struct smmu_as *as = &smmu->as[asid];
1000
1001                 as->smmu = smmu;
1002                 as->asid = asid;
1003                 as->pdir_attr = _PDIR_ATTR;
1004                 as->pde_attr  = _PDE_ATTR;
1005                 as->pte_attr  = _PTE_ATTR;
1006
1007                 mutex_init(&as->lock);
1008
1009                 e = tegra_iovmm_domain_init(&as->domain, &smmu->iovmm_dev,
1010                         smmu->iovmm_base,
1011                         smmu->iovmm_base +
1012                                 (smmu->page_count << SMMU_PAGE_SHIFT));
1013                 if (e)
1014                         goto fail;
1015         }
1016         spin_lock_init(&smmu->lock);
1017         smmu_setup_regs(smmu);
1018         smmu->enable = 1;
1019         platform_set_drvdata(pdev, smmu);
1020
1021         smmu->avp_vector_page = alloc_page(GFP_KERNEL);
1022         if (!smmu->avp_vector_page)
1023                 goto fail;
1024         return 0;
1025
1026 fail:
1027         if (smmu->avp_vector_page)
1028                 __free_page(smmu->avp_vector_page);
1029         if (smmu->regs)
1030                 iounmap(smmu->regs);
1031         if (smmu->regs_ahbarb)
1032                 iounmap(smmu->regs_ahbarb);
1033         if (smmu && smmu->as) {
1034                 for (asid = 0; asid < smmu->num_ases; asid++) {
1035                         if (smmu->as[asid].pdir_page) {
1036                                 ClearPageReserved(smmu->as[asid].pdir_page);
1037                                 __free_page(smmu->as[asid].pdir_page);
1038                         }
1039                 }
1040                 kfree(smmu->as);
1041         }
1042         kfree(smmu);
1043         return e;
1044 }
1045
1046 static struct platform_driver tegra_iovmm_smmu_drv = {
1047         .probe = smmu_probe,
1048         .remove = smmu_remove,
1049         .driver = {
1050                 .name = DRIVER_NAME,
1051         },
1052 };
1053
1054 static int __devinit smmu_init(void)
1055 {
1056         return platform_driver_register(&tegra_iovmm_smmu_drv);
1057 }
1058
1059 static void __exit smmu_exit(void)
1060 {
1061         platform_driver_unregister(&tegra_iovmm_smmu_drv);
1062 }
1063
1064 subsys_initcall(smmu_init);
1065 module_exit(smmu_exit);
1066
1067 /*
1068  * SMMU-global sysfs interface for debugging
1069  */
1070 static ssize_t _sysfs_show_reg(struct device *d,
1071                                 struct device_attribute *da, char *buf);
1072 static ssize_t _sysfs_store_reg(struct device *d,
1073                                 struct device_attribute *da, const char *buf,
1074                                 size_t count);
1075
1076 #define _NAME_MAP(_name)        {       \
1077         .name = __stringify(_name),     \
1078         .offset = _name##_0,            \
1079         .dev_attr = __ATTR(_name, S_IRUGO | S_IWUSR,    \
1080                         _sysfs_show_reg, _sysfs_store_reg)      \
1081 }
1082
1083 static
1084 struct _reg_name_map {
1085         const char *name;
1086         unsigned        offset;
1087         struct device_attribute dev_attr;
1088 } _smmu_reg_name_map[] = {
1089         _NAME_MAP(MC_SMMU_CONFIG),
1090         _NAME_MAP(MC_SMMU_TLB_CONFIG),
1091         _NAME_MAP(MC_SMMU_PTC_CONFIG),
1092         _NAME_MAP(MC_SMMU_PTB_ASID),
1093         _NAME_MAP(MC_SMMU_PTB_DATA),
1094         _NAME_MAP(MC_SMMU_TLB_FLUSH),
1095         _NAME_MAP(MC_SMMU_PTC_FLUSH),
1096         _NAME_MAP(MC_SMMU_ASID_SECURITY),
1097         _NAME_MAP(MC_SMMU_STATS_TLB_HIT_COUNT),
1098         _NAME_MAP(MC_SMMU_STATS_TLB_MISS_COUNT),
1099         _NAME_MAP(MC_SMMU_STATS_PTC_HIT_COUNT),
1100         _NAME_MAP(MC_SMMU_STATS_PTC_MISS_COUNT),
1101         _NAME_MAP(MC_SMMU_TRANSLATION_ENABLE_0),
1102         _NAME_MAP(MC_SMMU_TRANSLATION_ENABLE_1),
1103         _NAME_MAP(MC_SMMU_TRANSLATION_ENABLE_2),
1104         _NAME_MAP(MC_SMMU_AFI_ASID),
1105         _NAME_MAP(MC_SMMU_AVPC_ASID),
1106         _NAME_MAP(MC_SMMU_DC_ASID),
1107         _NAME_MAP(MC_SMMU_DCB_ASID),
1108         _NAME_MAP(MC_SMMU_EPP_ASID),
1109         _NAME_MAP(MC_SMMU_G2_ASID),
1110         _NAME_MAP(MC_SMMU_HC_ASID),
1111         _NAME_MAP(MC_SMMU_HDA_ASID),
1112         _NAME_MAP(MC_SMMU_ISP_ASID),
1113         _NAME_MAP(MC_SMMU_MPE_ASID),
1114         _NAME_MAP(MC_SMMU_NV_ASID),
1115         _NAME_MAP(MC_SMMU_NV2_ASID),
1116         _NAME_MAP(MC_SMMU_PPCS_ASID),
1117         _NAME_MAP(MC_SMMU_SATA_ASID),
1118         _NAME_MAP(MC_SMMU_VDE_ASID),
1119         _NAME_MAP(MC_SMMU_VI_ASID),
1120 };
1121
1122 static ssize_t lookup_reg(struct device_attribute *da)
1123 {
1124         int i;
1125         for (i = 0; i < ARRAY_SIZE(_smmu_reg_name_map); i++) {
1126                 if (!strcmp(_smmu_reg_name_map[i].name, da->attr.name))
1127                         return _smmu_reg_name_map[i].offset;
1128         }
1129         return -ENODEV;
1130 }
1131
1132 static ssize_t _sysfs_show_reg(struct device *d,
1133                                         struct device_attribute *da, char *buf)
1134 {
1135         struct smmu_device *smmu =
1136                 container_of(d, struct smmu_device, sysfs_dev);
1137         ssize_t offset = lookup_reg(da);
1138
1139         if (offset < 0)
1140                 return offset;
1141         return sprintf(buf, "%08lx\n",
1142                 (unsigned long)readl(smmu->regs + offset));
1143 }
1144
1145 static ssize_t _sysfs_store_reg(struct device *d,
1146                         struct device_attribute *da,
1147                         const char *buf, size_t count)
1148 {
1149         struct smmu_device *smmu =
1150                 container_of(d, struct smmu_device, sysfs_dev);
1151         ssize_t offset = lookup_reg(da);
1152         u32 value;
1153         int err;
1154
1155         if (offset < 0)
1156                 return offset;
1157
1158         err = kstrtou32(buf, 16, &value);
1159         if (err)
1160                 return err;
1161
1162 #ifdef CONFIG_TEGRA_IOVMM_SMMU_SYSFS
1163         writel(value, smmu->regs + offset);
1164 #else
1165         /* Allow writing to reg only for TLB/PTC stats enabling/disabling */
1166         {
1167                 unsigned long mask = 0;
1168                 switch (offset) {
1169                 case MC_SMMU_TLB_CONFIG_0:
1170                         mask = MC_SMMU_TLB_CONFIG_0_TLB_STATS__MASK;
1171                         break;
1172                 case MC_SMMU_PTC_CONFIG_0:
1173                         mask = MC_SMMU_PTC_CONFIG_0_PTC_STATS__MASK;
1174                         break;
1175                 default:
1176                         break;
1177                 }
1178
1179                 if (mask) {
1180                         unsigned long currval = readl(smmu->regs + offset);
1181                         currval &= ~mask;
1182                         value &= mask;
1183                         value |= currval;
1184                         writel(value, smmu->regs + offset);
1185                 }
1186         }
1187 #endif
1188         return count;
1189 }
1190
1191 static ssize_t _sysfs_show_smmu(struct device *d,
1192                                 struct device_attribute *da, char *buf)
1193 {
1194         struct smmu_device *smmu =
1195                 container_of(d, struct smmu_device, sysfs_dev);
1196         ssize_t rv = 0;
1197
1198         rv += sprintf(buf + rv , "      regs: %p\n", smmu->regs);
1199         rv += sprintf(buf + rv , "iovmm_base: %p\n", (void *)smmu->iovmm_base);
1200         rv += sprintf(buf + rv , "page_count: %lx\n", smmu->page_count);
1201         rv += sprintf(buf + rv , "  num_ases: %d\n", smmu->num_ases);
1202         rv += sprintf(buf + rv , "        as: %p\n", smmu->as);
1203         rv += sprintf(buf + rv , "    enable: %s\n",
1204                         smmu->enable ? "yes" : "no");
1205         return rv;
1206 }
1207
1208 static struct device_attribute _attr_show_smmu
1209                  = __ATTR(show_smmu, S_IRUGO, _sysfs_show_smmu, NULL);
1210
1211 #define _SYSFS_SHOW_VALUE(name, field, fmt)             \
1212 static ssize_t _sysfs_show_##name(struct device *d,     \
1213         struct device_attribute *da, char *buf)         \
1214 {                                                       \
1215         struct smmu_device *smmu =                      \
1216                 container_of(d, struct smmu_device, sysfs_dev); \
1217         ssize_t rv = 0;                                 \
1218         rv += sprintf(buf + rv, fmt "\n", smmu->field); \
1219         return rv;                                      \
1220 }
1221
1222 static void (*_sysfs_null_callback)(struct smmu_device *, unsigned long *) =
1223         NULL;
1224
1225 #define _SYSFS_SET_VALUE_DO(name, field, base, ceil, callback)  \
1226 static ssize_t _sysfs_set_##name(struct device *d,              \
1227                 struct device_attribute *da, const char *buf, size_t count) \
1228 {                                                               \
1229         int err;                                                \
1230         u32 value;                                              \
1231         struct smmu_device *smmu =                              \
1232                 container_of(d, struct smmu_device, sysfs_dev); \
1233         err = kstrtou32(buf, base, &value);                     \
1234         if (err)                                                \
1235                 return err;                                     \
1236         if (0 <= value && value < ceil) {                       \
1237                 smmu->field = value;                            \
1238                 if (callback)                                   \
1239                         callback(smmu, &smmu->field);           \
1240         }                                                       \
1241         return count;                                           \
1242 }
1243 #ifdef CONFIG_TEGRA_IOVMM_SMMU_SYSFS
1244 #define _SYSFS_SET_VALUE        _SYSFS_SET_VALUE_DO
1245 #else
1246 #define _SYSFS_SET_VALUE(name, field, base, ceil, callback)     \
1247 static ssize_t _sysfs_set_##name(struct device *d,              \
1248                 struct device_attribute *da, const char *buf, size_t count) \
1249 {                                                               \
1250         return count;                                           \
1251 }
1252 #endif
1253
1254 _SYSFS_SHOW_VALUE(lowest_asid, lowest_asid, "%lu")
1255 _SYSFS_SET_VALUE(lowest_asid, lowest_asid, 10,
1256                 MC_SMMU_NUM_ASIDS, _sysfs_null_callback)
1257 _SYSFS_SHOW_VALUE(debug_asid, debug_asid, "%lu")
1258 _SYSFS_SET_VALUE(debug_asid, debug_asid, 10,
1259                 MC_SMMU_NUM_ASIDS, _sysfs_null_callback)
1260 _SYSFS_SHOW_VALUE(signature_pid, signature_pid, "%lu")
1261 _SYSFS_SET_VALUE_DO(signature_pid, signature_pid, 10, PID_MAX_LIMIT + 1,
1262                 _sysfs_null_callback)
1263
1264 #ifdef CONFIG_TEGRA_IOVMM_SMMU_SYSFS
1265 static void _sysfs_mask_attr(struct smmu_device *smmu, unsigned long *field)
1266 {
1267         *field &= _MASK_ATTR;
1268 }
1269
1270 static void _sysfs_mask_pdir_attr(struct smmu_device *smmu,
1271         unsigned long *field)
1272 {
1273         unsigned long pdir;
1274
1275         _sysfs_mask_attr(smmu, field);
1276         writel(MC_SMMU_PTB_ASID_0_CURRENT_ASID(smmu->debug_asid),
1277                 smmu->regs + MC_SMMU_PTB_ASID_0);
1278         pdir = readl(smmu->regs + MC_SMMU_PTB_DATA_0);
1279         pdir &= ~_MASK_ATTR;
1280         pdir |= *field;
1281         writel(pdir, smmu->regs + MC_SMMU_PTB_DATA_0);
1282         FLUSH_SMMU_REGS(smmu);
1283 }
1284
1285 static void (*_sysfs_mask_attr_callback)(struct smmu_device *,
1286                                 unsigned long *field) = &_sysfs_mask_attr;
1287 static void (*_sysfs_mask_pdir_attr_callback)(struct smmu_device *,
1288                                 unsigned long *field) = &_sysfs_mask_pdir_attr;
1289 #endif
1290
1291 _SYSFS_SHOW_VALUE(pdir_attr, as[smmu->debug_asid].pdir_attr, "%lx")
1292 _SYSFS_SET_VALUE(pdir_attr, as[smmu->debug_asid].pdir_attr, 16,
1293                 _PDIR_ATTR + 1, _sysfs_mask_pdir_attr_callback)
1294 _SYSFS_SHOW_VALUE(pde_attr, as[smmu->debug_asid].pde_attr, "%lx")
1295 _SYSFS_SET_VALUE(pde_attr, as[smmu->debug_asid].pde_attr, 16,
1296                 _PDE_ATTR + 1, _sysfs_mask_attr_callback)
1297 _SYSFS_SHOW_VALUE(pte_attr, as[smmu->debug_asid].pte_attr, "%lx")
1298 _SYSFS_SET_VALUE(pte_attr, as[smmu->debug_asid].pte_attr, 16,
1299                 _PTE_ATTR + 1, _sysfs_mask_attr_callback)
1300
1301 static struct device_attribute _attr_values[] = {
1302         __ATTR(lowest_asid, S_IRUGO | S_IWUSR,
1303                 _sysfs_show_lowest_asid, _sysfs_set_lowest_asid),
1304         __ATTR(debug_asid, S_IRUGO | S_IWUSR,
1305                 _sysfs_show_debug_asid, _sysfs_set_debug_asid),
1306         __ATTR(signature_pid, S_IRUGO | S_IWUSR,
1307                 _sysfs_show_signature_pid, _sysfs_set_signature_pid),
1308
1309         __ATTR(pdir_attr, S_IRUGO | S_IWUSR,
1310                 _sysfs_show_pdir_attr, _sysfs_set_pdir_attr),
1311         __ATTR(pde_attr, S_IRUGO | S_IWUSR,
1312                 _sysfs_show_pde_attr, _sysfs_set_pde_attr),
1313         __ATTR(pte_attr, S_IRUGO | S_IWUSR,
1314                 _sysfs_show_pte_attr, _sysfs_set_pte_attr),
1315 };
1316
1317 static struct attribute *_smmu_attrs[
1318         ARRAY_SIZE(_smmu_reg_name_map) + ARRAY_SIZE(_attr_values) + 3];
1319 static struct attribute_group _smmu_attr_group = {
1320         .attrs = _smmu_attrs
1321 };
1322
1323 static void _sysfs_smmu(struct smmu_device *smmu, struct device *parent)
1324 {
1325         int i, j;
1326
1327         if (smmu->sysfs_use_count++ > 0)
1328                 return;
1329         for (i = 0; i < ARRAY_SIZE(_smmu_reg_name_map); i++)
1330                 _smmu_attrs[i] = &_smmu_reg_name_map[i].dev_attr.attr;
1331         for (j = 0; j < ARRAY_SIZE(_attr_values); j++)
1332                 _smmu_attrs[i++] = &_attr_values[j].attr;
1333         _smmu_attrs[i++] = &_attr_show_smmu.attr;
1334         _smmu_attrs[i] = NULL;
1335
1336         dev_set_name(&smmu->sysfs_dev, "smmu");
1337         smmu->sysfs_dev.parent = parent;
1338         smmu->sysfs_dev.driver = NULL;
1339         smmu->sysfs_dev.release = NULL;
1340         if (device_register(&smmu->sysfs_dev)) {
1341                 pr_err("%s: failed to register smmu_sysfs_dev\n", __func__);
1342                 smmu->sysfs_use_count--;
1343                 return;
1344         }
1345         if (sysfs_create_group(&smmu->sysfs_dev.kobj, &_smmu_attr_group)) {
1346                 pr_err("%s: failed to create group for smmu_sysfs_dev\n",
1347                         __func__);
1348                 smmu->sysfs_use_count--;
1349                 return;
1350         }
1351 }
1352
1353 static void _sysfs_create(struct smmu_as *as, struct device *parent)
1354 {
1355         _sysfs_smmu(as->smmu, parent);
1356 }