]> nv-tegra.nvidia Code Review - linux-2.6.git/blob - arch/arm/mach-tegra/iovmm-smmu.c
ARM: tegra11: clock: Use tabulated EMC clock register
[linux-2.6.git] / arch / arm / mach-tegra / iovmm-smmu.c
1 /*
2  * arch/arm/mach-tegra/iovmm-smmu.c
3  *
4  * Tegra I/O VMM implementation for SMMU devices for Tegra 3 series
5  * SoCs and later.
6  *
7  * Copyright (c) 2010-2012, NVIDIA CORPORATION.  All rights reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful, but WITHOUT
15  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
17  * more details.
18  *
19  * You should have received a copy of the GNU General Public License along
20  * with this program; if not, write to the Free Software Foundation, Inc.,
21  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
22  */
23
24 #include <linux/module.h>
25 #include <linux/platform_device.h>
26 #include <linux/spinlock.h>
27 #include <linux/slab.h>
28 #include <linux/vmalloc.h>
29 #include <linux/mm.h>
30 #include <linux/pagemap.h>
31 #include <linux/sysfs.h>
32 #include <linux/device.h>
33 #include <linux/sched.h>
34 #include <linux/io.h>
35 #include <linux/random.h>
36 #include <linux/ctype.h>
37 #include <linux/debugfs.h>
38 #include <linux/seq_file.h>
39
40 #include <asm/page.h>
41 #include <asm/cacheflush.h>
42
43 #include <mach/iovmm.h>
44 #include <mach/iomap.h>
45 #include <mach/tegra_smmu.h>
46
47 /*
48  * Macros without __ copied from armc.h
49  */
50 #define MC_INTSTATUS_0                                  0x0
51 #define MC_ERR_STATUS_0                                 0x8
52 #define MC_ERR_ADR_0                                    0xc
53
54 #define MC_SMMU_CONFIG_0                                0x10
55 #define MC_SMMU_CONFIG_0_SMMU_ENABLE_DISABLE            0
56 #define MC_SMMU_CONFIG_0_SMMU_ENABLE_ENABLE             1
57
58 #define MC_SMMU_TLB_CONFIG_0                            0x14
59 #define MC_SMMU_TLB_CONFIG_0_TLB_STATS_ENABLE__MASK     (1 << 31)
60 #define MC_SMMU_TLB_CONFIG_0_TLB_STATS_ENABLE           (1 << 31)
61 #define MC_SMMU_TLB_CONFIG_0_TLB_STATS_TEST__MASK       (1 << 30)
62 #define MC_SMMU_TLB_CONFIG_0_TLB_STATS_TEST             (1 << 30)
63 #ifdef CONFIG_ARCH_TEGRA_3x_SOC
64 #define MC_SMMU_TLB_CONFIG_0_TLB_ACTIVE_LINES__VALUE    0x10
65 #define MC_SMMU_TLB_CONFIG_0_RESET_VAL                  0x20000010
66 #else
67 #define MC_SMMU_TLB_CONFIG_0_TLB_ACTIVE_LINES__VALUE    0x20
68 #define MC_SMMU_TLB_CONFIG_0_RESET_VAL                  0x20000020
69 #endif
70
71 #define MC_SMMU_PTC_CONFIG_0                            0x18
72 #define MC_SMMU_PTC_CONFIG_0_PTC_STATS_ENABLE__MASK     (1 << 31)
73 #define MC_SMMU_PTC_CONFIG_0_PTC_STATS_ENABLE           (1 << 31)
74 #define MC_SMMU_PTC_CONFIG_0_PTC_STATS_TEST__MASK       (1 << 30)
75 #define MC_SMMU_PTC_CONFIG_0_PTC_STATS_TEST             (1 << 30)
76 #define MC_SMMU_PTC_CONFIG_0_PTC_INDEX_MAP__PATTERN     0x3f
77 #define MC_SMMU_PTC_CONFIG_0_RESET_VAL                  0x2000003f
78
79 #define MC_SMMU_STATS_CONFIG_MASK               \
80         MC_SMMU_PTC_CONFIG_0_PTC_STATS_ENABLE__MASK
81 #define MC_SMMU_STATS_CONFIG_ENABLE             \
82         MC_SMMU_PTC_CONFIG_0_PTC_STATS_ENABLE
83 #define MC_SMMU_STATS_CONFIG_TEST               \
84         MC_SMMU_PTC_CONFIG_0_PTC_STATS_TEST
85
86 #define MC_SMMU_PTB_ASID_0                              0x1c
87 #define MC_SMMU_PTB_ASID_0_CURRENT_ASID_SHIFT           0
88
89 #define MC_SMMU_PTB_DATA_0                              0x20
90 #define MC_SMMU_PTB_DATA_0_RESET_VAL                    0
91 #define MC_SMMU_PTB_DATA_0_ASID_NONSECURE_SHIFT         29
92 #define MC_SMMU_PTB_DATA_0_ASID_WRITABLE_SHIFT          30
93 #define MC_SMMU_PTB_DATA_0_ASID_READABLE_SHIFT          31
94
95 #define MC_SMMU_TLB_FLUSH_0                             0x30
96 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_MATCH_ALL              0
97 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_MATCH_SECTION          2
98 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_MATCH_GROUP            3
99 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_SHIFT                29
100 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_DISABLE        0
101 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_ENABLE         1
102 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_SHIFT          31
103
104 #define MC_SMMU_PTC_FLUSH_0                             0x34
105 #define MC_SMMU_PTC_FLUSH_0_PTC_FLUSH_TYPE_ALL          0
106 #define MC_SMMU_PTC_FLUSH_0_PTC_FLUSH_TYPE_ADR          1
107 #define MC_SMMU_PTC_FLUSH_0_PTC_FLUSH_ADR_SHIFT         4
108
109 #define MC_SMMU_ASID_SECURITY_0                         0x38
110 #define MC_EMEM_CFG_0                                   0x50
111 #define MC_SECURITY_CFG0_0                              0x70
112 #define MC_SECURITY_CFG1_0                              0x74
113 #define MC_SECURITY_CFG2_0                              0x78
114 #define MC_SECURITY_RSV_0                               0x7c
115
116 #define MC_SMMU_STATS_TLB_HIT_COUNT_0                   0x1f0
117 #define MC_SMMU_STATS_TLB_MISS_COUNT_0                  0x1f4
118 #define MC_SMMU_STATS_PTC_HIT_COUNT_0                   0x1f8
119 #define MC_SMMU_STATS_PTC_MISS_COUNT_0                  0x1fc
120
121 #define MC_SMMU_TRANSLATION_ENABLE_0_0                  0x228
122 #define MC_SMMU_TRANSLATION_ENABLE_1_0                  0x22c
123 #define MC_SMMU_TRANSLATION_ENABLE_2_0                  0x230
124
125 #define MC_SMMU_AFI_ASID_0              0x238   /* PCIE (T30) */
126 #define MC_SMMU_AVPC_ASID_0             0x23c   /* AVP */
127 #define MC_SMMU_DC_ASID_0               0x240   /* Display controller */
128 #define MC_SMMU_DCB_ASID_0              0x244   /* Display controller B */
129 #define MC_SMMU_EPP_ASID_0              0x248   /* Encoder pre-processor */
130 #define MC_SMMU_G2_ASID_0               0x24c   /* 2D engine */
131 #define MC_SMMU_HC_ASID_0               0x250   /* Host1x */
132 #define MC_SMMU_HDA_ASID_0              0x254   /* High-def audio */
133 #define MC_SMMU_ISP_ASID_0              0x258   /* Image signal processor */
134 #define MC_SMMU_MPE_ASID_0              0x264   /* MPEG encoder (T30) */
135 #define MC_SMMU_MSENC_ASID_0            0x264   /* MPEG encoder (T11x) */
136 #define MC_SMMU_NV_ASID_0               0x268   /* 3D */
137 #define MC_SMMU_NV2_ASID_0              0x26c   /* 3D secondary (T30) */
138 #define MC_SMMU_PPCS_ASID_0             0x270   /* AHB */
139 #define MC_SMMU_SATA_ASID_0             0x278   /* SATA (T30) */
140 #define MC_SMMU_VDE_ASID_0              0x27c   /* Video decoder */
141 #define MC_SMMU_VI_ASID_0               0x280   /* Video input */
142 #define MC_SMMU_XUSB_HOST_ASID_0        0x288   /* USB host (T11x) */
143 #define MC_SMMU_XUSB_DEV_ASID_0         0x28c   /* USB dev (T11x) */
144 #define MC_SMMU_TSEC_ASID_0             0x294   /* TSEC (T11x) */
145 #define MC_SMMU_PPCS1_ASID_0            0x298   /* AHB secondary (T11x) */
146
147 /*
148  * Tegra11x
149  */
150 #define MC_STAT_CONTROL_0                       0x100
151 #define MC_STAT_EMC_CLOCKS_0                    0x110
152 #define MC_STAT_EMC_FILTER_SET0_ADR_LIMIT_LO_0  0x118
153 #define MC_STAT_EMC_FILTER_SET0_ADR_LIMIT_HI_0  0x11c
154 #define MC_STAT_EMC_FILTER_SET0_CLIENT_0_0      0x128
155 #define MC_STAT_EMC_FILTER_SET0_CLIENT_1_0      0x12c
156 #define MC_STAT_EMC_FILTER_SET0_CLIENT_2_0      0x130
157 #define MC_STAT_EMC_SET0_COUNT_0                0x138
158 #define MC_STAT_EMC_SET0_COUNT_MSBS_0           0x13c
159 #define MC_STAT_EMC_FILTER_SET1_ADR_LIMIT_LO_0  0x158
160 #define MC_STAT_EMC_FILTER_SET1_ADR_LIMIT_HI_0  0x15c
161 #define MC_STAT_EMC_FILTER_SET1_CLIENT_0_0      0x168
162 #define MC_STAT_EMC_FILTER_SET1_CLIENT_1_0      0x16c
163 #define MC_STAT_EMC_FILTER_SET1_CLIENT_2_0      0x170
164 #define MC_STAT_EMC_SET1_COUNT_0                0x178
165 #define MC_STAT_EMC_SET1_COUNT_MSBS_0           0x17c
166 #define MC_STAT_EMC_FILTER_SET0_VIRTUAL_ADR_LIMIT_LO_0  0x198
167 #define MC_STAT_EMC_FILTER_SET0_VIRTUAL_ADR_LIMIT_HI_0  0x19c
168 #define MC_STAT_EMC_FILTER_SET0_ASID_0          0x1a0
169 #define MC_STAT_EMC_FILTER_SET1_VIRTUAL_ADR_LIMIT_LO_0  0x1a8
170 #define MC_STAT_EMC_FILTER_SET1_VIRTUAL_ADR_LIMIT_HI_0  0x1ac
171 #define MC_STAT_EMC_FILTER_SET1_ASID_0          0x1b0
172
173 /*
174  * Copied from arahb_arbc.h
175  */
176 #ifndef CONFIG_ARCH_TEGRA_3x_SOC
177 #define AHB_MASTER_SWID_0               0x18
178 #endif
179 #define AHB_ARBITRATION_XBAR_CTRL_0     0xe0
180 #define AHB_ARBITRATION_XBAR_CTRL_0_SMMU_INIT_DONE_DONE         1
181 #define AHB_ARBITRATION_XBAR_CTRL_0_SMMU_INIT_DONE_SHIFT        17
182
183 /*
184  * Copied from arapbdma.h
185  */
186 #ifndef CONFIG_ARCH_TEGRA_3x_SOC
187 #define APBDMA_CHANNEL_SWID_0           0x3c
188 #endif
189
190 #define MC_SMMU_NUM_ASIDS       4
191 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_SECTION__MASK          0xffc00000
192 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_SECTION__SHIFT 12 /* right shift */
193 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_GROUP__MASK            0xffffc000
194 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_GROUP__SHIFT   12 /* right shift */
195 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA(iova, which)   \
196         ((((iova) & MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_##which##__MASK) >> \
197                 MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_##which##__SHIFT) |    \
198         MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_MATCH_##which)
199 #define MC_SMMU_PTB_ASID_0_CURRENT_ASID(n)      \
200                 ((n) << MC_SMMU_PTB_ASID_0_CURRENT_ASID_SHIFT)
201 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_disable                \
202                 (MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_DISABLE <<    \
203                         MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_SHIFT)
204 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH__ENABLE                \
205                 (MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_ENABLE <<     \
206                         MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_SHIFT)
207
208 #define VMM_NAME "iovmm-smmu"
209 #define DRIVER_NAME "tegra_smmu"
210
211 #define SMMU_PAGE_SHIFT 12
212 #define SMMU_PAGE_SIZE  (1 << SMMU_PAGE_SHIFT)
213
214 #define SMMU_PDIR_COUNT 1024
215 #define SMMU_PDIR_SIZE  (sizeof(unsigned long) * SMMU_PDIR_COUNT)
216 #define SMMU_PTBL_COUNT 1024
217 #define SMMU_PTBL_SIZE  (sizeof(unsigned long) * SMMU_PTBL_COUNT)
218 #define SMMU_PDIR_SHIFT 12
219 #define SMMU_PDE_SHIFT  12
220 #define SMMU_PTE_SHIFT  12
221 #define SMMU_PFN_MASK   0x000fffff
222
223 #define SMMU_PDE_NEXT_SHIFT             28
224
225 #define SMMU_ADDR_TO_PFN(addr)  ((addr) >> 12)
226 #define SMMU_ADDR_TO_PDN(addr)  ((addr) >> 22)
227 #define SMMU_PDN_TO_ADDR(pdn)   ((pdn) << 22)
228
229 #define _READABLE       (1 << MC_SMMU_PTB_DATA_0_ASID_READABLE_SHIFT)
230 #define _WRITABLE       (1 << MC_SMMU_PTB_DATA_0_ASID_WRITABLE_SHIFT)
231 #define _NONSECURE      (1 << MC_SMMU_PTB_DATA_0_ASID_NONSECURE_SHIFT)
232 #define _PDE_NEXT       (1 << SMMU_PDE_NEXT_SHIFT)
233 #define _MASK_ATTR      (_READABLE | _WRITABLE | _NONSECURE)
234
235 #define _PDIR_ATTR      (_READABLE | _WRITABLE | _NONSECURE)
236
237 #define _PDE_ATTR       (_READABLE | _WRITABLE | _NONSECURE)
238 #define _PDE_ATTR_N     (_PDE_ATTR | _PDE_NEXT)
239 #define _PDE_VACANT(pdn)        (((pdn) << 10) | _PDE_ATTR)
240
241 #define _PTE_ATTR       (_READABLE | _WRITABLE | _NONSECURE)
242 #define _PTE_VACANT(addr)       (((addr) >> SMMU_PAGE_SHIFT) | _PTE_ATTR)
243
244 #define SMMU_MK_PDIR(page, attr)        \
245                 ((page_to_phys(page) >> SMMU_PDIR_SHIFT) | (attr))
246 #define SMMU_MK_PDE(page, attr)         \
247                 (unsigned long)((page_to_phys(page) >> SMMU_PDE_SHIFT) | (attr))
248 #define SMMU_EX_PTBL_PAGE(pde)          \
249                 pfn_to_page((unsigned long)(pde) & SMMU_PFN_MASK)
250 #define SMMU_PFN_TO_PTE(pfn, attr)      (unsigned long)((pfn) | (attr))
251
252 #define SMMU_ASID_ENABLE(asid)  ((asid) | (1 << 31))
253 #define SMMU_ASID_DISABLE       0
254 #define SMMU_ASID_ASID(n)       ((n) & ~SMMU_ASID_ENABLE(0))
255
256 #ifdef CONFIG_ARCH_TEGRA_3x_SOC
257 #define SMMU_HWC        \
258         op(AFI)         \
259         op(AVPC)        \
260         op(DC)          \
261         op(DCB)         \
262         op(EPP)         \
263         op(G2)          \
264         op(HC)          \
265         op(HDA)         \
266         op(ISP)         \
267         op(MPE)         \
268         op(NV)          \
269         op(NV2)         \
270         op(PPCS)        \
271         op(SATA)        \
272         op(VDE)         \
273         op(VI)
274 #endif
275
276 #ifdef CONFIG_ARCH_TEGRA_11x_SOC
277 #define SMMU_HWC        \
278         op(AVPC)        \
279         op(DC)          \
280         op(DCB)         \
281         op(EPP)         \
282         op(G2)          \
283         op(HC)          \
284         op(HDA)         \
285         op(ISP)         \
286         op(MSENC)       \
287         op(NV)          \
288         op(PPCS)        \
289         op(PPCS1)       \
290         op(TSEC)        \
291         op(VDE)         \
292         op(VI)          \
293         op(XUSB_DEV)    \
294         op(XUSB_HOST)
295 #endif
296
297 /* Keep this as a "natural" enumeration (no assignments) */
298 enum smmu_hwclient {
299 #define op(c)   HWC_##c,
300         SMMU_HWC
301 #undef op
302         HWC_COUNT
303 };
304
305 struct smmu_hwc_state {
306         unsigned long reg;
307         unsigned long enable_disable;
308 };
309
310 /* Hardware client mapping initializer */
311 #define HWC_INIT(client)        \
312         [HWC_##client] = {MC_SMMU_##client##_ASID_0, SMMU_ASID_DISABLE},
313
314 static const struct smmu_hwc_state smmu_hwc_state_init[] = {
315 #define op(c)   HWC_INIT(c)
316         SMMU_HWC
317 #undef op
318 };
319
320
321 struct domain_hwc_map {
322         const char *dev_name;
323         const enum smmu_hwclient *hwcs;
324         const unsigned int nr_hwcs;
325 };
326
327 /* Enable all hardware clients for SMMU translation */
328 static const enum smmu_hwclient nvmap_hwcs[] = {
329 #define op(c)   HWC_##c,
330         SMMU_HWC
331 #undef op
332 };
333
334 static const struct domain_hwc_map smmu_hwc_map[] = {
335         {
336                 .dev_name = "nvmap",
337                 .hwcs = nvmap_hwcs,
338                 .nr_hwcs = ARRAY_SIZE(nvmap_hwcs),
339         },
340 };
341
342 /*
343  * Per address space
344  */
345 struct smmu_as {
346         struct smmu_device      *smmu;  /* back pointer to container */
347         unsigned int            asid;
348         const struct domain_hwc_map     *hwclients;
349         struct mutex    lock;   /* for pagetable */
350         struct tegra_iovmm_domain domain;
351         struct page     *pdir_page;
352         unsigned long   pdir_attr;
353         unsigned long   pde_attr;
354         unsigned long   pte_attr;
355         unsigned int    *pte_count;
356         struct device   sysfs_dev;
357         int             sysfs_use_count;
358 };
359
360 /*
361  * Register bank index
362  */
363 enum {
364         _MC,
365 #ifdef TEGRA_MC0_BASE
366         _MC0,
367 #endif
368 #ifdef TEGRA_MC1_BASE
369         _MC1,
370 #endif
371         _AHBARB,
372         _APBDMA,
373         _REGS,
374 };
375
376 static const struct {
377         unsigned long base;
378         size_t size;
379 } tegra_reg[_REGS] = {
380         [_MC]   = {TEGRA_MC_BASE, TEGRA_MC_SIZE},
381 #ifdef TEGRA_MC0_BASE
382         [_MC0]  = {TEGRA_MC0_BASE, TEGRA_MC0_SIZE},
383 #endif
384 #ifdef TEGRA_MC1_BASE
385         [_MC1]  = {TEGRA_MC1_BASE, TEGRA_MC1_SIZE},
386 #endif
387         [_AHBARB]       = {TEGRA_AHB_ARB_BASE, TEGRA_AHB_ARB_SIZE},
388         [_APBDMA]       = {TEGRA_APB_DMA_BASE, TEGRA_APB_DMA_SIZE},
389 };
390
391 /*
392  * Aliases for register bank base addres holders (remapped)
393  */
394 #define regs_mc         regs[_MC]
395 #define regs_mc0        regs[_MC0]
396 #define regs_mc1        regs[_MC1]
397 #define regs_ahbarb     regs[_AHBARB]
398 #define regs_apbdma     regs[_APBDMA]
399
400 /*
401  * Per SMMU device
402  */
403 struct smmu_device {
404         void __iomem    *regs[_REGS];
405         tegra_iovmm_addr_t      iovmm_base;     /* remappable base address */
406         unsigned long   page_count;             /* total remappable size */
407         spinlock_t      lock;
408         char            *name;
409         struct tegra_iovmm_device iovmm_dev;
410         int             num_ases;
411         struct smmu_as  *as;                    /* Run-time allocated array */
412         struct smmu_hwc_state   hwc_state[HWC_COUNT];
413         struct device   sysfs_dev;
414         int             sysfs_use_count;
415         bool            enable;
416 #ifdef CONFIG_ARCH_TEGRA_3x_SOC
417         struct page *avp_vector_page;   /* dummy page shared by all AS's */
418 #endif
419         /*
420          * Register image savers for suspend/resume
421          */
422         unsigned long translation_enable_0_0;
423         unsigned long translation_enable_1_0;
424         unsigned long translation_enable_2_0;
425         unsigned long asid_security_0;
426
427         unsigned long lowest_asid;      /* Variables for hardware testing */
428         unsigned long debug_asid;
429         unsigned long signature_pid;    /* For debugging aid */
430         unsigned long challenge_code;   /* For debugging aid */
431         unsigned long challenge_pid;    /* For debugging aid */
432
433         struct device *dev;
434         struct dentry *debugfs_root;
435 };
436
437 #define VA_PAGE_TO_PA(va, page) \
438         (page_to_phys(page) + ((unsigned long)(va) & ~PAGE_MASK))
439
440 static inline void
441 flush_cpu_dcache(void *va, struct page *page, size_t size)
442 {
443         unsigned long _pa_ = VA_PAGE_TO_PA((unsigned long)va, page);
444         __cpuc_flush_dcache_area((void *)(va), (size_t)(size));
445         outer_flush_range(_pa_, _pa_+(size_t)(size));
446 }
447
448 /*
449  * Any interaction between any block on PPSB and a block on APB or AHB
450  * must have these read-back to ensure the APB/AHB bus transaction is
451  * complete before initiating activity on the PPSB block.
452  */
453 static inline void flush_smmu_regs(struct smmu_device *smmu)
454 {
455         (void)readl((smmu)->regs_mc + MC_SMMU_CONFIG_0);
456 }
457
458 /*
459  * Flush all TLB entries and all PTC entries
460  * Caller must lock smmu
461  */
462 static void smmu_flush_regs(struct smmu_device *smmu, int enable)
463 {
464         writel(MC_SMMU_PTC_FLUSH_0_PTC_FLUSH_TYPE_ALL,
465                 smmu->regs_mc + MC_SMMU_PTC_FLUSH_0);
466         flush_smmu_regs(smmu);
467         writel(MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_MATCH_ALL |
468                         MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_disable,
469                         smmu->regs_mc + MC_SMMU_TLB_FLUSH_0);
470
471         if (enable)
472                 writel(MC_SMMU_CONFIG_0_SMMU_ENABLE_ENABLE,
473                                 smmu->regs_mc + MC_SMMU_CONFIG_0);
474
475         flush_smmu_regs(smmu);
476 }
477
478 static void smmu_setup_regs(struct smmu_device *smmu)
479 {
480         int i;
481
482         if (smmu->as) {
483                 int asid;
484
485                 /* Set/restore page directory for each AS */
486                 for (asid = 0; asid < smmu->num_ases; asid++) {
487                         struct smmu_as *as = &smmu->as[asid];
488
489                         writel(MC_SMMU_PTB_ASID_0_CURRENT_ASID(as->asid),
490                                 smmu->regs_mc + MC_SMMU_PTB_ASID_0);
491                         writel(as->pdir_page
492                                 ? SMMU_MK_PDIR(as->pdir_page, as->pdir_attr)
493                                 : MC_SMMU_PTB_DATA_0_RESET_VAL,
494                                 smmu->regs_mc + MC_SMMU_PTB_DATA_0);
495                 }
496         }
497
498         /* Set/restore ASID for each hardware client */
499         for (i = 0; i < HWC_COUNT; i++) {
500                 struct smmu_hwc_state *hwcst = &smmu->hwc_state[i];
501                 writel(hwcst->enable_disable, smmu->regs_mc + hwcst->reg);
502         }
503
504         writel(smmu->translation_enable_0_0,
505                 smmu->regs_mc + MC_SMMU_TRANSLATION_ENABLE_0_0);
506         writel(smmu->translation_enable_1_0,
507                 smmu->regs_mc + MC_SMMU_TRANSLATION_ENABLE_1_0);
508         writel(smmu->translation_enable_2_0,
509                 smmu->regs_mc + MC_SMMU_TRANSLATION_ENABLE_2_0);
510         writel(smmu->asid_security_0,
511                 smmu->regs_mc + MC_SMMU_ASID_SECURITY_0);
512         writel(MC_SMMU_TLB_CONFIG_0_RESET_VAL,
513                 smmu->regs_mc + MC_SMMU_TLB_CONFIG_0);
514         writel(MC_SMMU_PTC_CONFIG_0_RESET_VAL,
515                 smmu->regs_mc + MC_SMMU_PTC_CONFIG_0);
516
517         smmu_flush_regs(smmu, 1);
518         writel(
519                 readl(smmu->regs_ahbarb + AHB_ARBITRATION_XBAR_CTRL_0) |
520                 (AHB_ARBITRATION_XBAR_CTRL_0_SMMU_INIT_DONE_DONE <<
521                         AHB_ARBITRATION_XBAR_CTRL_0_SMMU_INIT_DONE_SHIFT),
522                 smmu->regs_ahbarb + AHB_ARBITRATION_XBAR_CTRL_0);
523 }
524
525 static int smmu_suspend(struct tegra_iovmm_device *dev)
526 {
527         struct smmu_device *smmu =
528                 container_of(dev, struct smmu_device, iovmm_dev);
529
530         smmu->translation_enable_0_0 =
531                 readl(smmu->regs_mc + MC_SMMU_TRANSLATION_ENABLE_0_0);
532         smmu->translation_enable_1_0 =
533                 readl(smmu->regs_mc + MC_SMMU_TRANSLATION_ENABLE_1_0);
534         smmu->translation_enable_2_0 =
535                 readl(smmu->regs_mc + MC_SMMU_TRANSLATION_ENABLE_2_0);
536         smmu->asid_security_0 =
537                 readl(smmu->regs_mc + MC_SMMU_ASID_SECURITY_0);
538         return 0;
539 }
540
541 static void smmu_resume(struct tegra_iovmm_device *dev)
542 {
543         struct smmu_device *smmu =
544                 container_of(dev, struct smmu_device, iovmm_dev);
545
546         if (!smmu->enable)
547                 return;
548
549         spin_lock(&smmu->lock);
550         smmu_setup_regs(smmu);
551         spin_unlock(&smmu->lock);
552 }
553
554 static void flush_ptc_and_tlb(struct smmu_device *smmu,
555                 struct smmu_as *as, unsigned long iova,
556                 unsigned long *pte, struct page *ptpage, int is_pde)
557 {
558         unsigned long tlb_flush_va = is_pde
559                         ?  MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA(iova, SECTION)
560                         :  MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA(iova, GROUP);
561
562         writel(MC_SMMU_PTC_FLUSH_0_PTC_FLUSH_TYPE_ADR |
563                 VA_PAGE_TO_PA(pte, ptpage),
564                 smmu->regs_mc + MC_SMMU_PTC_FLUSH_0);
565         flush_smmu_regs(smmu);
566         writel(tlb_flush_va |
567                 MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH__ENABLE |
568                 (as->asid << MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_SHIFT),
569                 smmu->regs_mc + MC_SMMU_TLB_FLUSH_0);
570         flush_smmu_regs(smmu);
571 }
572
573 static void free_ptbl(struct smmu_as *as, unsigned long iova)
574 {
575         unsigned long pdn = SMMU_ADDR_TO_PDN(iova);
576         unsigned long *pdir = (unsigned long *)kmap(as->pdir_page);
577
578         if (pdir[pdn] != _PDE_VACANT(pdn)) {
579                 pr_debug("%s:%d pdn=%lx\n", __func__, __LINE__, pdn);
580
581                 ClearPageReserved(SMMU_EX_PTBL_PAGE(pdir[pdn]));
582                 __free_page(SMMU_EX_PTBL_PAGE(pdir[pdn]));
583                 pdir[pdn] = _PDE_VACANT(pdn);
584                 flush_cpu_dcache(&pdir[pdn], as->pdir_page, sizeof pdir[pdn]);
585                 flush_ptc_and_tlb(as->smmu, as, iova, &pdir[pdn],
586                                 as->pdir_page, 1);
587         }
588         kunmap(as->pdir_page);
589 }
590
591 static void free_pdir(struct smmu_as *as)
592 {
593         if (as->pdir_page) {
594                 unsigned addr = as->smmu->iovmm_base;
595                 int count = as->smmu->page_count;
596
597                 while (count-- > 0) {
598                         free_ptbl(as, addr);
599                         addr += SMMU_PAGE_SIZE * SMMU_PTBL_COUNT;
600                 }
601                 ClearPageReserved(as->pdir_page);
602                 __free_page(as->pdir_page);
603                 as->pdir_page = NULL;
604                 kfree(as->pte_count);
605                 as->pte_count = NULL;
606         }
607 }
608
609 static const char * const smmu_debugfs_mc[] = {
610         "mc",
611 #ifdef TEGRA_MC0_BASE
612         "mc0",
613 #endif
614 #ifdef TEGRA_MC1_BASE
615         "mc1",
616 #endif
617 };
618
619 static const char * const smmu_debugfs_cache[] = {  "tlb", "ptc", };
620
621 static ssize_t smmu_debugfs_stats_write(struct file *file,
622                                         const char __user *buffer,
623                                         size_t count, loff_t *pos)
624 {
625         struct inode *inode;
626         struct dentry *cache, *mc, *root;
627         struct smmu_device *smmu;
628         int mc_idx, cache_idx, i;
629         u32 offs, val;
630         const char * const smmu_debugfs_stats_ctl[] = { "off", "on", "reset"};
631         char str[] = "reset";
632
633         count = min_t(size_t, count, sizeof(str));
634         if (copy_from_user(str, buffer, count))
635                 return -EINVAL;
636
637         for (i = 0; i < ARRAY_SIZE(smmu_debugfs_stats_ctl); i++)
638                 if (strncmp(str, smmu_debugfs_stats_ctl[i],
639                             strlen(smmu_debugfs_stats_ctl[i])) == 0)
640                         break;
641
642         if (i == ARRAY_SIZE(smmu_debugfs_stats_ctl))
643                 return -EINVAL;
644
645         cache = file->f_dentry;
646         inode = cache->d_inode;
647         cache_idx = (int)inode->i_private;
648         mc = cache->d_parent;
649         mc_idx = (int)mc->d_inode->i_private;
650         root = mc->d_parent;
651         smmu = root->d_inode->i_private;
652
653         offs = MC_SMMU_TLB_CONFIG_0;
654         offs += sizeof(u32) * cache_idx;
655         offs += 2 * sizeof(u32) * ARRAY_SIZE(smmu_debugfs_cache) * mc_idx;
656
657         val = readl(smmu->regs + offs);
658         switch (i) {
659         case 0:
660                 val &= ~MC_SMMU_STATS_CONFIG_ENABLE;
661                 val &= ~MC_SMMU_STATS_CONFIG_TEST;
662                 writel(val, smmu->regs + offs);
663                 break;
664         case 1:
665                 val |= MC_SMMU_STATS_CONFIG_ENABLE;
666                 val &= ~MC_SMMU_STATS_CONFIG_TEST;
667                 writel(val, smmu->regs + offs);
668                 break;
669         case 2:
670                 val |= MC_SMMU_STATS_CONFIG_TEST;
671                 writel(val, smmu->regs + offs);
672                 val &= ~MC_SMMU_STATS_CONFIG_TEST;
673                 writel(val, smmu->regs + offs);
674                 break;
675         default:
676                 BUG();
677                 break;
678         }
679
680         pr_debug("%s() %08x, %08x @%08x\n", __func__,
681                  val, readl(smmu->regs + offs), offs);
682
683         return count;
684 }
685
686 static int smmu_debugfs_stats_show(struct seq_file *s, void *v)
687 {
688         struct inode *inode;
689         struct dentry *cache, *mc, *root;
690         struct smmu_device *smmu;
691         int mc_idx, cache_idx, i;
692         u32 offs;
693         const char * const smmu_debugfs_stats[] = { "hit", "miss", };
694
695         inode = s->private;
696
697         cache = d_find_alias(inode);
698         cache_idx = (int)inode->i_private;
699         mc = cache->d_parent;
700         mc_idx = (int)mc->d_inode->i_private;
701         root = mc->d_parent;
702         smmu = root->d_inode->i_private;
703
704         offs = MC_SMMU_STATS_TLB_HIT_COUNT_0;
705         offs += ARRAY_SIZE(smmu_debugfs_stats) * sizeof(u32) * cache_idx;
706         offs += ARRAY_SIZE(smmu_debugfs_stats) * sizeof(u32) *
707                 ARRAY_SIZE(smmu_debugfs_cache) * mc_idx;
708
709         for (i = 0; i < ARRAY_SIZE(smmu_debugfs_stats); i++) {
710                 u32 val;
711
712                 offs += sizeof(u32) * i;
713                 val = readl(smmu->regs + offs);
714
715                 seq_printf(s, "%s:%08x ", smmu_debugfs_stats[i], val);
716
717                 pr_debug("%s() %s %08x @%08x\n", __func__,
718                          smmu_debugfs_stats[i], val, offs);
719         }
720         seq_printf(s, "\n");
721
722         return 0;
723 }
724
725 static int smmu_debugfs_stats_open(struct inode *inode, struct file *file)
726 {
727         return single_open(file, smmu_debugfs_stats_show, inode);
728 }
729
730 static const struct file_operations smmu_debugfs_stats_fops = {
731         .open           = smmu_debugfs_stats_open,
732         .read           = seq_read,
733         .llseek         = seq_lseek,
734         .release        = single_release,
735         .write          = smmu_debugfs_stats_write,
736 };
737
738 static void smmu_debugfs_delete(struct smmu_device *smmu)
739 {
740         debugfs_remove_recursive(smmu->debugfs_root);
741 }
742
743 static void smmu_debugfs_create(struct smmu_device *smmu)
744 {
745         int i;
746         struct dentry *root;
747
748         root = debugfs_create_file("smmu",
749                                    S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
750                                    NULL, smmu, NULL);
751         if (!root)
752                 goto err_out;
753         smmu->debugfs_root = root;
754
755         for (i = 0; i < ARRAY_SIZE(smmu_debugfs_mc); i++) {
756                 int j;
757                 struct dentry *mc;
758
759                 mc = debugfs_create_file(smmu_debugfs_mc[i],
760                                          S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
761                                          root, (void *)i, NULL);
762                 if (!mc)
763                         goto err_out;
764
765                 for (j = 0; j < ARRAY_SIZE(smmu_debugfs_cache); j++) {
766                         struct dentry *cache;
767
768                         cache = debugfs_create_file(smmu_debugfs_cache[j],
769                                                     S_IWUGO | S_IRUGO, mc,
770                                                     (void *)j,
771                                                     &smmu_debugfs_stats_fops);
772                         if (!cache)
773                                 goto err_out;
774                 }
775         }
776
777         return;
778
779 err_out:
780         smmu_debugfs_delete(smmu);
781 }
782
783 static int smmu_remove(struct platform_device *pdev)
784 {
785         struct smmu_device *smmu = platform_get_drvdata(pdev);
786         int i;
787
788         if (!smmu)
789                 return 0;
790
791         smmu_debugfs_delete(smmu);
792
793         if (smmu->enable) {
794                 writel(MC_SMMU_CONFIG_0_SMMU_ENABLE_DISABLE,
795                         smmu->regs_mc + MC_SMMU_CONFIG_0);
796                 smmu->enable = 0;
797         }
798         platform_set_drvdata(pdev, NULL);
799
800         if (smmu->as) {
801                 int asid;
802
803                 for (asid = 0; asid < smmu->num_ases; asid++)
804                         free_pdir(&smmu->as[asid]);
805                 kfree(smmu->as);
806         }
807
808 #ifdef CONFIG_ARCH_TEGRA_3x_SOC
809         if (smmu->avp_vector_page)
810                 __free_page(smmu->avp_vector_page);
811 #endif
812         tegra_iovmm_unregister(&smmu->iovmm_dev);
813         for (i = 0; i < _REGS; i++) {
814                 if (smmu->regs[i]) {
815                         iounmap(smmu->regs[i]);
816                         smmu->regs[i] = NULL;
817                 }
818         }
819         kfree(smmu);
820         return 0;
821 }
822
823 /*
824  * Maps PTBL for given iova and returns the PTE address
825  * Caller must unmap the mapped PTBL returned in *ptbl_page_p
826  */
827 static unsigned long *locate_pte(struct smmu_as *as,
828                 unsigned long iova, bool allocate,
829                 struct page **ptbl_page_p,
830                 unsigned int **pte_counter)
831 {
832         unsigned long ptn = SMMU_ADDR_TO_PFN(iova);
833         unsigned long pdn = SMMU_ADDR_TO_PDN(iova);
834         unsigned long *pdir = kmap(as->pdir_page);
835         unsigned long *ptbl;
836
837         if (pdir[pdn] != _PDE_VACANT(pdn)) {
838                 /* Mapped entry table already exists */
839                 *ptbl_page_p = SMMU_EX_PTBL_PAGE(pdir[pdn]);
840                 ptbl = kmap(*ptbl_page_p);
841         } else if (!allocate) {
842                 kunmap(as->pdir_page);
843                 return NULL;
844         } else {
845                 /* Vacant - allocate a new page table */
846                 pr_debug("%s:%d new PTBL pdn=%lx\n", __func__, __LINE__, pdn);
847
848                 *ptbl_page_p = alloc_page(GFP_KERNEL | __GFP_DMA);
849                 if (!*ptbl_page_p) {
850                         kunmap(as->pdir_page);
851                         pr_err(DRIVER_NAME
852                         ": failed to allocate tegra_iovmm_device page table\n");
853                         return NULL;
854                 }
855                 SetPageReserved(*ptbl_page_p);
856                 ptbl = (unsigned long *)kmap(*ptbl_page_p);
857                 {
858                         int pn;
859                         unsigned long addr = SMMU_PDN_TO_ADDR(pdn);
860                         for (pn = 0; pn < SMMU_PTBL_COUNT;
861                                 pn++, addr += SMMU_PAGE_SIZE) {
862                                 ptbl[pn] = _PTE_VACANT(addr);
863                         }
864                 }
865                 flush_cpu_dcache(ptbl, *ptbl_page_p, SMMU_PTBL_SIZE);
866                 pdir[pdn] = SMMU_MK_PDE(*ptbl_page_p,
867                                 as->pde_attr | _PDE_NEXT);
868                 flush_cpu_dcache(&pdir[pdn], as->pdir_page, sizeof pdir[pdn]);
869                 flush_ptc_and_tlb(as->smmu, as, iova, &pdir[pdn],
870                                 as->pdir_page, 1);
871         }
872         *pte_counter = &as->pte_count[pdn];
873
874         kunmap(as->pdir_page);
875         return &ptbl[ptn % SMMU_PTBL_COUNT];
876 }
877
878 static void put_signature(struct smmu_as *as,
879                         unsigned long addr, unsigned long pfn)
880 {
881         if (as->smmu->signature_pid == current->pid) {
882                 struct page *page = pfn_to_page(pfn);
883                 unsigned long *vaddr = kmap(page);
884                 if (vaddr) {
885                         vaddr[0] = addr;
886                         vaddr[1] = pfn << PAGE_SHIFT;
887                         flush_cpu_dcache(vaddr, page, sizeof(vaddr[0]) * 2);
888                         kunmap(page);
889                 }
890         }
891 }
892
893 static int smmu_map(struct tegra_iovmm_domain *domain,
894                 struct tegra_iovmm_area *iovma)
895 {
896         struct smmu_as *as = container_of(domain, struct smmu_as, domain);
897         unsigned long addr = iovma->iovm_start;
898         unsigned long pcount = iovma->iovm_length >> SMMU_PAGE_SHIFT;
899         int i;
900
901         pr_debug("%s:%d iova=%lx asid=%d\n", __func__, __LINE__,
902                  addr, as - as->smmu->as);
903
904         for (i = 0; i < pcount; i++) {
905                 unsigned long pfn;
906                 unsigned long *pte;
907                 unsigned int *pte_counter;
908                 struct page *ptpage;
909
910                 pfn = iovma->ops->lock_makeresident(iovma, i << PAGE_SHIFT);
911                 if (!pfn_valid(pfn))
912                         goto fail;
913
914                 mutex_lock(&as->lock);
915
916                 pte = locate_pte(as, addr, true, &ptpage, &pte_counter);
917                 if (!pte)
918                         goto fail2;
919
920                 pr_debug("%s:%d iova=%lx pfn=%lx asid=%d\n",
921                          __func__, __LINE__, addr, pfn, as - as->smmu->as);
922
923                 if (*pte == _PTE_VACANT(addr))
924                         (*pte_counter)++;
925                 *pte = SMMU_PFN_TO_PTE(pfn, as->pte_attr);
926                 if (unlikely((*pte == _PTE_VACANT(addr))))
927                         (*pte_counter)--;
928                 flush_cpu_dcache(pte, ptpage, sizeof *pte);
929                 flush_ptc_and_tlb(as->smmu, as, addr, pte, ptpage, 0);
930                 kunmap(ptpage);
931                 mutex_unlock(&as->lock);
932                 put_signature(as, addr, pfn);
933                 addr += SMMU_PAGE_SIZE;
934         }
935         return 0;
936
937 fail:
938         mutex_lock(&as->lock);
939 fail2:
940         while (i-- > 0) {
941                 unsigned long *pte;
942                 unsigned int *pte_counter;
943                 struct page *page;
944
945                 iovma->ops->release(iovma, i<<PAGE_SHIFT);
946                 addr -= SMMU_PAGE_SIZE;
947                 pte = locate_pte(as, addr, false, &page, &pte_counter);
948                 if (pte) {
949                         if (*pte != _PTE_VACANT(addr)) {
950                                 *pte = _PTE_VACANT(addr);
951                                 flush_cpu_dcache(pte, page, sizeof *pte);
952                                 flush_ptc_and_tlb(as->smmu, as, addr, pte,
953                                                 page, 0);
954                                 kunmap(page);
955                                 if (!--(*pte_counter))
956                                         free_ptbl(as, addr);
957                         } else {
958                                 kunmap(page);
959                         }
960                 }
961         }
962         mutex_unlock(&as->lock);
963         return -ENOMEM;
964 }
965
966 static void smmu_unmap(struct tegra_iovmm_domain *domain,
967         struct tegra_iovmm_area *iovma, bool decommit)
968 {
969         struct smmu_as *as = container_of(domain, struct smmu_as, domain);
970         unsigned long addr = iovma->iovm_start;
971         unsigned int pcount = iovma->iovm_length >> SMMU_PAGE_SHIFT;
972         unsigned int i, *pte_counter;
973
974         pr_debug("%s:%d iova=%lx asid=%d\n", __func__, __LINE__,
975                  addr, as - as->smmu->as);
976
977         mutex_lock(&as->lock);
978         for (i = 0; i < pcount; i++) {
979                 unsigned long *pte;
980                 struct page *page;
981
982                 if (iovma->ops && iovma->ops->release)
983                         iovma->ops->release(iovma, i << PAGE_SHIFT);
984
985                 pte = locate_pte(as, addr, false, &page, &pte_counter);
986                 if (pte) {
987                         if (*pte != _PTE_VACANT(addr)) {
988                                 *pte = _PTE_VACANT(addr);
989                                 flush_cpu_dcache(pte, page, sizeof *pte);
990                                 flush_ptc_and_tlb(as->smmu, as, addr, pte,
991                                                 page, 0);
992                                 kunmap(page);
993                                 if (!--(*pte_counter) && decommit) {
994                                         free_ptbl(as, addr);
995                                         smmu_flush_regs(as->smmu, 0);
996                                 }
997                         }
998                 }
999                 addr += SMMU_PAGE_SIZE;
1000         }
1001         mutex_unlock(&as->lock);
1002 }
1003
1004 static void smmu_map_pfn(struct tegra_iovmm_domain *domain,
1005         struct tegra_iovmm_area *iovma, unsigned long addr,
1006         unsigned long pfn)
1007 {
1008         struct smmu_as *as = container_of(domain, struct smmu_as, domain);
1009         struct smmu_device *smmu = as->smmu;
1010         unsigned long *pte;
1011         unsigned int *pte_counter;
1012         struct page *ptpage;
1013
1014         pr_debug("%s:%d iova=%lx pfn=%lx asid=%d\n", __func__, __LINE__,
1015                  addr, pfn, as - as->smmu->as);
1016
1017         BUG_ON(!pfn_valid(pfn));
1018         mutex_lock(&as->lock);
1019         pte = locate_pte(as, addr, true, &ptpage, &pte_counter);
1020         if (pte) {
1021                 if (*pte == _PTE_VACANT(addr))
1022                         (*pte_counter)++;
1023                 *pte = SMMU_PFN_TO_PTE(pfn, as->pte_attr);
1024                 if (unlikely((*pte == _PTE_VACANT(addr))))
1025                         (*pte_counter)--;
1026                 flush_cpu_dcache(pte, ptpage, sizeof *pte);
1027                 flush_ptc_and_tlb(smmu, as, addr, pte, ptpage, 0);
1028                 kunmap(ptpage);
1029                 put_signature(as, addr, pfn);
1030         }
1031         mutex_unlock(&as->lock);
1032 }
1033
1034 /*
1035  * Caller must lock/unlock as
1036  */
1037 static int alloc_pdir(struct smmu_as *as)
1038 {
1039         unsigned long *pdir;
1040         int pdn;
1041
1042         if (as->pdir_page)
1043                 return 0;
1044
1045         as->pte_count = kzalloc(sizeof(as->pte_count[0]) * SMMU_PDIR_COUNT,
1046                                 GFP_KERNEL);
1047         if (!as->pte_count) {
1048                 pr_err(DRIVER_NAME
1049                 ": failed to allocate tegra_iovmm_device PTE cunters\n");
1050                 return -ENOMEM;
1051         }
1052         as->pdir_page = alloc_page(GFP_KERNEL | __GFP_DMA);
1053         if (!as->pdir_page) {
1054                 pr_err(DRIVER_NAME
1055                 ": failed to allocate tegra_iovmm_device page directory\n");
1056                 kfree(as->pte_count);
1057                 as->pte_count = NULL;
1058                 return -ENOMEM;
1059         }
1060         SetPageReserved(as->pdir_page);
1061         pdir = kmap(as->pdir_page);
1062
1063         for (pdn = 0; pdn < SMMU_PDIR_COUNT; pdn++)
1064                 pdir[pdn] = _PDE_VACANT(pdn);
1065         flush_cpu_dcache(pdir, as->pdir_page, SMMU_PDIR_SIZE);
1066         writel(MC_SMMU_PTC_FLUSH_0_PTC_FLUSH_TYPE_ADR |
1067                 VA_PAGE_TO_PA(pdir, as->pdir_page),
1068                 as->smmu->regs_mc + MC_SMMU_PTC_FLUSH_0);
1069         flush_smmu_regs(as->smmu);
1070         writel(MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_MATCH_ALL |
1071                 MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH__ENABLE |
1072                 (as->asid << MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_SHIFT),
1073                 as->smmu->regs_mc + MC_SMMU_TLB_FLUSH_0);
1074         flush_smmu_regs(as->smmu);
1075         kunmap(as->pdir_page);
1076
1077         return 0;
1078 }
1079
1080 static void _sysfs_create(struct smmu_as *as, struct device *sysfs_parent);
1081
1082 /*
1083  * Allocate resources for an AS
1084  *      TODO: split into "alloc" and "lock"
1085  */
1086 static struct tegra_iovmm_domain *smmu_alloc_domain(
1087         struct tegra_iovmm_device *dev, struct tegra_iovmm_client *client)
1088 {
1089         struct smmu_device *smmu =
1090                 container_of(dev, struct smmu_device, iovmm_dev);
1091         struct smmu_as *as = NULL;
1092         const struct domain_hwc_map *map = NULL;
1093         int asid, i;
1094
1095         /* Look for a free AS */
1096         for  (asid = smmu->lowest_asid; asid < smmu->num_ases; asid++) {
1097                 mutex_lock(&smmu->as[asid].lock);
1098                 if (!smmu->as[asid].hwclients) {
1099                         as = &smmu->as[asid];
1100                         break;
1101                 }
1102                 mutex_unlock(&smmu->as[asid].lock);
1103         }
1104
1105         if (!as) {
1106                 pr_err(DRIVER_NAME ": no free AS\n");
1107                 return NULL;
1108         }
1109
1110         if (alloc_pdir(as) < 0)
1111                 goto bad3;
1112
1113         /* Look for a matching hardware client group */
1114         for (i = 0; i < ARRAY_SIZE(smmu_hwc_map); i++) {
1115                 if (!strcmp(smmu_hwc_map[i].dev_name, client->misc_dev->name)) {
1116                         map = &smmu_hwc_map[i];
1117                         break;
1118                 }
1119         }
1120
1121         if (!map) {
1122                 pr_err(DRIVER_NAME ": no SMMU resource for %s (%s)\n",
1123                         client->name, client->misc_dev->name);
1124                 goto bad2;
1125         }
1126
1127         spin_lock(&smmu->lock);
1128         /* Update PDIR register */
1129         writel(MC_SMMU_PTB_ASID_0_CURRENT_ASID(as->asid),
1130                 as->smmu->regs_mc + MC_SMMU_PTB_ASID_0);
1131         writel(SMMU_MK_PDIR(as->pdir_page, as->pdir_attr),
1132                 as->smmu->regs_mc + MC_SMMU_PTB_DATA_0);
1133         flush_smmu_regs(smmu);
1134
1135         /* Put each hardware client in the group into the address space */
1136         for (i = 0; i < map->nr_hwcs; i++) {
1137                 struct smmu_hwc_state *hwcst = &smmu->hwc_state[map->hwcs[i]];
1138
1139                 /* Is the hardware client busy? */
1140                 if (hwcst->enable_disable != SMMU_ASID_DISABLE &&
1141                         hwcst->enable_disable != SMMU_ASID_ENABLE(as->asid)) {
1142                         pr_err(DRIVER_NAME
1143                                 ": HW 0x%lx busy for ASID %ld (client!=%s)\n",
1144                                 hwcst->reg,
1145                                 SMMU_ASID_ASID(hwcst->enable_disable),
1146                                 client->name);
1147                         goto bad;
1148                 }
1149                 hwcst->enable_disable = SMMU_ASID_ENABLE(as->asid);
1150                 writel(hwcst->enable_disable, smmu->regs_mc + hwcst->reg);
1151         }
1152         flush_smmu_regs(smmu);
1153         spin_unlock(&smmu->lock);
1154         as->hwclients = map;
1155         _sysfs_create(as, client->misc_dev->this_device);
1156         mutex_unlock(&as->lock);
1157
1158 #ifdef CONFIG_ARCH_TEGRA_3x_SOC
1159         /* Reserve "page zero" for AVP vectors using a common dummy page */
1160         smmu_map_pfn(&as->domain, NULL, 0,
1161                 page_to_phys(as->smmu->avp_vector_page) >> SMMU_PAGE_SHIFT);
1162 #endif
1163         return &as->domain;
1164
1165 bad:
1166         /* Reset hardware clients that have been enabled */
1167         while (--i >= 0) {
1168                 struct smmu_hwc_state *hwcst = &smmu->hwc_state[map->hwcs[i]];
1169
1170                 hwcst->enable_disable = SMMU_ASID_DISABLE;
1171                 writel(hwcst->enable_disable, smmu->regs_mc + hwcst->reg);
1172         }
1173         flush_smmu_regs(smmu);
1174         spin_unlock(&as->smmu->lock);
1175 bad2:
1176         free_pdir(as);
1177 bad3:
1178         mutex_unlock(&as->lock);
1179         return NULL;
1180
1181 }
1182
1183 /*
1184  * Release resources for an AS
1185  *      TODO: split into "unlock" and "free"
1186  */
1187 static void smmu_free_domain(
1188         struct tegra_iovmm_domain *domain, struct tegra_iovmm_client *client)
1189 {
1190         struct smmu_as *as = container_of(domain, struct smmu_as, domain);
1191         struct smmu_device *smmu = as->smmu;
1192         const struct domain_hwc_map *map = NULL;
1193         int i;
1194
1195         mutex_lock(&as->lock);
1196         map = as->hwclients;
1197
1198         spin_lock(&smmu->lock);
1199         for (i = 0; i < map->nr_hwcs; i++) {
1200                 struct smmu_hwc_state *hwcst = &smmu->hwc_state[map->hwcs[i]];
1201
1202                 hwcst->enable_disable = SMMU_ASID_DISABLE;
1203                 writel(SMMU_ASID_DISABLE, smmu->regs_mc + hwcst->reg);
1204         }
1205         flush_smmu_regs(smmu);
1206         spin_unlock(&smmu->lock);
1207
1208         as->hwclients = NULL;
1209         if (as->pdir_page) {
1210                 spin_lock(&smmu->lock);
1211                 writel(MC_SMMU_PTB_ASID_0_CURRENT_ASID(as->asid),
1212                         smmu->regs_mc + MC_SMMU_PTB_ASID_0);
1213                 writel(MC_SMMU_PTB_DATA_0_RESET_VAL,
1214                         smmu->regs_mc + MC_SMMU_PTB_DATA_0);
1215                 flush_smmu_regs(smmu);
1216                 spin_unlock(&smmu->lock);
1217
1218                 free_pdir(as);
1219         }
1220         mutex_unlock(&as->lock);
1221 }
1222
1223 static struct tegra_iovmm_device_ops tegra_iovmm_smmu_ops = {
1224         .map = smmu_map,
1225         .unmap = smmu_unmap,
1226         .map_pfn = smmu_map_pfn,
1227         .alloc_domain = smmu_alloc_domain,
1228         .free_domain = smmu_free_domain,
1229         .suspend = smmu_suspend,
1230         .resume = smmu_resume,
1231 };
1232
1233 static int smmu_probe(struct platform_device *pdev)
1234 {
1235         struct smmu_device *smmu = NULL;
1236         struct resource *window = NULL;
1237         int e, i, asid;
1238
1239         BUILD_BUG_ON(PAGE_SHIFT != SMMU_PAGE_SHIFT);
1240         BUILD_BUG_ON(ARRAY_SIZE(smmu_hwc_state_init) != HWC_COUNT);
1241
1242         window = tegra_smmu_window(0);
1243         if (!window) {
1244                 pr_err(DRIVER_NAME ": No SMMU resources\n");
1245                 return -ENODEV;
1246         }
1247
1248         smmu = kzalloc(sizeof(*smmu), GFP_KERNEL);
1249         if (!smmu) {
1250                 pr_err(DRIVER_NAME ": failed to allocate smmu_device\n");
1251                 return -ENOMEM;
1252         }
1253
1254         smmu->num_ases = MC_SMMU_NUM_ASIDS;
1255         smmu->iovmm_base = (tegra_iovmm_addr_t)window->start;
1256         smmu->page_count = (window->end + 1 - window->start) >> SMMU_PAGE_SHIFT;
1257         for (i = _MC; i < _REGS; i++) {
1258                 if (tegra_reg[i].base != 0)
1259                         smmu->regs[i] = ioremap(tegra_reg[i].base,
1260                                 tegra_reg[i].size);
1261         }
1262
1263         smmu->translation_enable_0_0 = ~0;
1264         smmu->translation_enable_1_0 = ~0;
1265         smmu->translation_enable_2_0 = ~0;
1266         smmu->asid_security_0        = 0;
1267
1268         memcpy(smmu->hwc_state, smmu_hwc_state_init, sizeof(smmu->hwc_state));
1269
1270         smmu->iovmm_dev.name = VMM_NAME;
1271         smmu->iovmm_dev.ops = &tegra_iovmm_smmu_ops;
1272         smmu->iovmm_dev.pgsize_bits = SMMU_PAGE_SHIFT;
1273
1274         e = tegra_iovmm_register(&smmu->iovmm_dev);
1275         if (e)
1276                 goto fail;
1277
1278         smmu->as = kzalloc(sizeof(smmu->as[0]) * smmu->num_ases, GFP_KERNEL);
1279         if (!smmu->as) {
1280                 pr_err(DRIVER_NAME ": failed to allocate smmu_as\n");
1281                 e = -ENOMEM;
1282                 goto fail;
1283         }
1284
1285         /* Initialize address space structure array */
1286         for (asid = 0; asid < smmu->num_ases; asid++) {
1287                 struct smmu_as *as = &smmu->as[asid];
1288
1289                 as->smmu = smmu;
1290                 as->asid = asid;
1291                 as->pdir_attr = _PDIR_ATTR;
1292                 as->pde_attr  = _PDE_ATTR;
1293                 as->pte_attr  = _PTE_ATTR;
1294
1295                 mutex_init(&as->lock);
1296
1297                 e = tegra_iovmm_domain_init(&as->domain, &smmu->iovmm_dev,
1298                         smmu->iovmm_base,
1299                         smmu->iovmm_base +
1300                                 (smmu->page_count << SMMU_PAGE_SHIFT));
1301                 if (e)
1302                         goto fail;
1303         }
1304         spin_lock_init(&smmu->lock);
1305         smmu_setup_regs(smmu);
1306         smmu->enable = 1;
1307         smmu->dev = &pdev->dev;
1308         platform_set_drvdata(pdev, smmu);
1309
1310 #ifdef CONFIG_ARCH_TEGRA_3x_SOC
1311         smmu->avp_vector_page = alloc_page(GFP_KERNEL);
1312         if (!smmu->avp_vector_page)
1313                 goto fail;
1314 #endif
1315         smmu_debugfs_create(smmu);
1316
1317         return 0;
1318
1319 fail:
1320 #ifdef CONFIG_ARCH_TEGRA_3x_SOC
1321         if (smmu->avp_vector_page)
1322                 __free_page(smmu->avp_vector_page);
1323 #endif
1324         if (smmu && smmu->as) {
1325                 for (asid = 0; asid < smmu->num_ases; asid++) {
1326                         if (smmu->as[asid].pdir_page) {
1327                                 ClearPageReserved(smmu->as[asid].pdir_page);
1328                                 __free_page(smmu->as[asid].pdir_page);
1329                         }
1330                 }
1331                 kfree(smmu->as);
1332         }
1333         for (i = 0; i < _REGS; i++) {
1334                 if (smmu->regs[i]) {
1335                         iounmap(smmu->regs[i]);
1336                         smmu->regs[i] = NULL;
1337                 }
1338         }
1339         kfree(smmu);
1340         return e;
1341 }
1342
1343 static struct platform_driver tegra_iovmm_smmu_drv = {
1344         .probe = smmu_probe,
1345         .remove = smmu_remove,
1346         .driver = {
1347                 .name = DRIVER_NAME,
1348         },
1349 };
1350
1351 static int __devinit smmu_init(void)
1352 {
1353         return platform_driver_register(&tegra_iovmm_smmu_drv);
1354 }
1355
1356 static void __exit smmu_exit(void)
1357 {
1358         platform_driver_unregister(&tegra_iovmm_smmu_drv);
1359 }
1360
1361 subsys_initcall(smmu_init);
1362 module_exit(smmu_exit);
1363
1364 /*
1365  * SMMU-global sysfs interface for debugging
1366  */
1367 static ssize_t _sysfs_show_reg(struct device *d,
1368                                 struct device_attribute *da, char *buf);
1369 static ssize_t _sysfs_store_reg(struct device *d,
1370                                 struct device_attribute *da, const char *buf,
1371                                 size_t count);
1372
1373 #define _NAME_MAP_SUFFIX(_name, base, suffix)   {       \
1374         .name = __stringify(_name) suffix,      \
1375         .offset = _name##_0,            \
1376         .regbase = (base),              \
1377         .dev_attr = __ATTR(_name, S_IRUGO | S_IWUSR,    \
1378                         _sysfs_show_reg, _sysfs_store_reg)      \
1379 }
1380 #define _NAME_MAP(_name, base)  _NAME_MAP_SUFFIX(_name, base, "")
1381
1382 static
1383 struct _reg_name_map {
1384         const char *name;
1385         size_t  offset;
1386         unsigned regbase;
1387         struct device_attribute dev_attr;
1388 } _smmu_reg_name_map[] = {
1389         _NAME_MAP(MC_INTSTATUS, _MC),
1390         _NAME_MAP(MC_ERR_STATUS, _MC),
1391         _NAME_MAP(MC_ERR_ADR, _MC),
1392
1393         _NAME_MAP(MC_SMMU_CONFIG, _MC),
1394         _NAME_MAP(MC_SMMU_TLB_CONFIG, _MC),
1395         _NAME_MAP(MC_SMMU_PTC_CONFIG, _MC),
1396         _NAME_MAP(MC_SMMU_PTB_ASID, _MC),
1397         _NAME_MAP(MC_SMMU_PTB_DATA, _MC),
1398         _NAME_MAP(MC_SMMU_TLB_FLUSH, _MC),
1399         _NAME_MAP(MC_SMMU_PTC_FLUSH, _MC),
1400         _NAME_MAP(MC_SMMU_ASID_SECURITY, _MC),
1401         _NAME_MAP(MC_EMEM_CFG, _MC),
1402         _NAME_MAP(MC_SECURITY_CFG0, _MC),
1403         _NAME_MAP(MC_SECURITY_CFG1, _MC),
1404         _NAME_MAP(MC_SECURITY_CFG2, _MC),
1405         _NAME_MAP(MC_SECURITY_RSV, _MC),
1406         _NAME_MAP(MC_SMMU_STATS_TLB_HIT_COUNT, _MC),
1407         _NAME_MAP(MC_SMMU_STATS_TLB_MISS_COUNT, _MC),
1408         _NAME_MAP(MC_SMMU_STATS_PTC_HIT_COUNT, _MC),
1409         _NAME_MAP(MC_SMMU_STATS_PTC_MISS_COUNT, _MC),
1410 #ifdef TEGRA_MC0_BASE
1411         _NAME_MAP_SUFFIX(MC_SMMU_STATS_TLB_HIT_COUNT, _MC0, ".0"),
1412         _NAME_MAP_SUFFIX(MC_SMMU_STATS_TLB_MISS_COUNT, _MC0, ".0"),
1413         _NAME_MAP_SUFFIX(MC_SMMU_STATS_PTC_HIT_COUNT, _MC0, ".0"),
1414         _NAME_MAP_SUFFIX(MC_SMMU_STATS_PTC_MISS_COUNT, _MC0, ".0"),
1415 #endif
1416 #ifdef TEGRA_MC1_BASE
1417         _NAME_MAP_SUFFIX(MC_SMMU_STATS_TLB_HIT_COUNT, _MC1, ".1"),
1418         _NAME_MAP_SUFFIX(MC_SMMU_STATS_TLB_MISS_COUNT, _MC1, ".1"),
1419         _NAME_MAP_SUFFIX(MC_SMMU_STATS_PTC_HIT_COUNT, _MC1, ".1"),
1420         _NAME_MAP_SUFFIX(MC_SMMU_STATS_PTC_MISS_COUNT, _MC1, ".1"),
1421 #endif
1422         _NAME_MAP(MC_SMMU_TRANSLATION_ENABLE_0, _MC),
1423         _NAME_MAP(MC_SMMU_TRANSLATION_ENABLE_1, _MC),
1424         _NAME_MAP(MC_SMMU_TRANSLATION_ENABLE_2, _MC),
1425
1426         _NAME_MAP(MC_STAT_CONTROL, _MC),
1427         _NAME_MAP(MC_STAT_EMC_CLOCKS, _MC),
1428         _NAME_MAP(MC_STAT_EMC_FILTER_SET0_ADR_LIMIT_LO, _MC),
1429         _NAME_MAP(MC_STAT_EMC_FILTER_SET0_ADR_LIMIT_HI, _MC),
1430         _NAME_MAP(MC_STAT_EMC_FILTER_SET0_CLIENT_0, _MC),
1431         _NAME_MAP(MC_STAT_EMC_FILTER_SET0_CLIENT_1, _MC),
1432         _NAME_MAP(MC_STAT_EMC_FILTER_SET0_CLIENT_2, _MC),
1433         _NAME_MAP(MC_STAT_EMC_SET0_COUNT, _MC),
1434         _NAME_MAP(MC_STAT_EMC_SET0_COUNT_MSBS, _MC),
1435         _NAME_MAP(MC_STAT_EMC_FILTER_SET1_ADR_LIMIT_LO, _MC),
1436         _NAME_MAP(MC_STAT_EMC_FILTER_SET1_ADR_LIMIT_HI, _MC),
1437         _NAME_MAP(MC_STAT_EMC_FILTER_SET1_CLIENT_0, _MC),
1438         _NAME_MAP(MC_STAT_EMC_FILTER_SET1_CLIENT_1, _MC),
1439         _NAME_MAP(MC_STAT_EMC_FILTER_SET1_CLIENT_2, _MC),
1440         _NAME_MAP(MC_STAT_EMC_SET1_COUNT, _MC),
1441         _NAME_MAP(MC_STAT_EMC_SET1_COUNT_MSBS, _MC),
1442         _NAME_MAP(MC_STAT_EMC_FILTER_SET0_VIRTUAL_ADR_LIMIT_LO, _MC),
1443         _NAME_MAP(MC_STAT_EMC_FILTER_SET0_VIRTUAL_ADR_LIMIT_HI, _MC),
1444         _NAME_MAP(MC_STAT_EMC_FILTER_SET0_ASID, _MC),
1445         _NAME_MAP(MC_STAT_EMC_FILTER_SET1_VIRTUAL_ADR_LIMIT_LO, _MC),
1446         _NAME_MAP(MC_STAT_EMC_FILTER_SET1_VIRTUAL_ADR_LIMIT_HI, _MC),
1447         _NAME_MAP(MC_STAT_EMC_FILTER_SET1_ASID, _MC),
1448 #define op(c)   _NAME_MAP(MC_SMMU_##c##_ASID, _MC),
1449         SMMU_HWC
1450 #undef op
1451         _NAME_MAP(AHB_ARBITRATION_XBAR_CTRL, _AHBARB),
1452 #ifdef AHB_MASTER_SWID_0
1453         _NAME_MAP(AHB_MASTER_SWID, _AHBARB),
1454 #endif
1455 #ifdef APBDMA_CHANNEL_SWID_0
1456         _NAME_MAP(APBDMA_CHANNEL_SWID, _APBDMA),
1457 #endif
1458 };
1459
1460 static struct _reg_name_map *lookup_reg(struct device_attribute *da)
1461 {
1462         int i;
1463         for (i = 0; i < ARRAY_SIZE(_smmu_reg_name_map); i++) {
1464                 if (!strcmp(_smmu_reg_name_map[i].name, da->attr.name))
1465                         return &_smmu_reg_name_map[i];
1466         }
1467         return NULL;
1468 }
1469
1470 static ssize_t _sysfs_show_reg(struct device *d,
1471                                         struct device_attribute *da, char *buf)
1472 {
1473         struct smmu_device *smmu =
1474                 container_of(d, struct smmu_device, sysfs_dev);
1475         struct _reg_name_map *reg = lookup_reg(da);
1476
1477         if (!reg)
1478                 return -ENODEV;
1479         return sprintf(buf, "%08lx @%08lx\n",
1480                 (unsigned long)readl(smmu->regs[reg->regbase] + reg->offset),
1481                 tegra_reg[reg->regbase].base + reg->offset);
1482 }
1483
1484 #ifdef CONFIG_TEGRA_IOVMM_SMMU_SYSFS
1485 #define good_challenge(smmu)    ((smmu->challenge_pid = 0), 1)
1486 #else
1487 static inline int good_challenge(struct smmu_device *smmu)
1488 {
1489         int ok = (smmu->challenge_pid == current->pid);
1490         smmu->challenge_pid = 0;
1491         return ok;
1492 }
1493 #endif
1494
1495 static ssize_t _sysfs_store_reg(struct device *d,
1496                         struct device_attribute *da,
1497                         const char *buf, size_t count)
1498 {
1499         struct smmu_device *smmu =
1500                 container_of(d, struct smmu_device, sysfs_dev);
1501         struct _reg_name_map *reg = lookup_reg(da);
1502         unsigned long value;
1503
1504         if (!reg)
1505                 return -ENODEV;
1506         if (kstrtoul(buf, 16, &value))
1507                 return count;
1508         if (good_challenge(smmu))
1509                 writel(value, smmu->regs[reg->regbase] + reg->offset);
1510         else if (reg->regbase == _MC) {
1511                 unsigned long mask = 0;
1512                 switch (reg->offset) {
1513                 case MC_SMMU_TLB_CONFIG_0:
1514                         mask = MC_SMMU_TLB_CONFIG_0_TLB_STATS_ENABLE__MASK |
1515                                 MC_SMMU_TLB_CONFIG_0_TLB_STATS_TEST__MASK;
1516                         break;
1517                 case MC_SMMU_PTC_CONFIG_0:
1518                         mask = MC_SMMU_PTC_CONFIG_0_PTC_STATS_ENABLE__MASK |
1519                                 MC_SMMU_PTC_CONFIG_0_PTC_STATS_TEST__MASK;
1520                         break;
1521                 default:
1522                         break;
1523                 }
1524
1525                 if (mask) {
1526                         unsigned long currval =
1527                                 (unsigned long)readl(smmu->regs[reg->regbase] +
1528                                                 reg->offset);
1529                         currval &= ~mask;
1530                         value &= mask;
1531                         value |= currval;
1532                         writel(value, smmu->regs[reg->regbase] + reg->offset);
1533                 }
1534         }
1535         return count;
1536 }
1537
1538 static ssize_t _sysfs_show_smmu(struct device *d,
1539                                 struct device_attribute *da, char *buf)
1540 {
1541         struct smmu_device *smmu =
1542                 container_of(d, struct smmu_device, sysfs_dev);
1543         ssize_t rv = 0;
1544         int asid;
1545
1546         rv += sprintf(buf + rv , "    regs_mc: %p @%8lx\n",
1547                                 smmu->regs_mc, tegra_reg[_MC].base);
1548 #ifdef TEGRA_MC0_BASE
1549         rv += sprintf(buf + rv , "   regs_mc0: %p @%8lx\n",
1550                                 smmu->regs_mc0, tegra_reg[_MC0].base);
1551 #endif
1552 #ifdef TEGRA_MC1_BASE
1553         rv += sprintf(buf + rv , "   regs_mc1: %p @%8lx\n",
1554                                 smmu->regs_mc1, tegra_reg[_MC1].base);
1555 #endif
1556         rv += sprintf(buf + rv , "regs_ahbarb: %p @%8lx\n",
1557                                 smmu->regs_ahbarb, tegra_reg[_AHBARB].base);
1558         rv += sprintf(buf + rv , "regs_apbdma: %p @%8lx\n",
1559                                 smmu->regs_apbdma, tegra_reg[_APBDMA].base);
1560         rv += sprintf(buf + rv , " iovmm_base: %p\n", (void *)smmu->iovmm_base);
1561         rv += sprintf(buf + rv , " page_count: %8lx\n", smmu->page_count);
1562         rv += sprintf(buf + rv , "   num_ases: %d\n", smmu->num_ases);
1563         rv += sprintf(buf + rv , "         as: %p\n", smmu->as);
1564         for (asid = 0; asid < smmu->num_ases; asid++) {
1565                 rv +=
1566               sprintf(buf + rv , " ----- asid: %d\n", smmu->as[asid].asid);
1567                 rv +=
1568               sprintf(buf + rv , "  pdir_page: %p", smmu->as[asid].pdir_page);
1569                 if (smmu->as[asid].pdir_page)
1570                         rv +=
1571               sprintf(buf + rv , " @%8lx\n",
1572                         (unsigned long)page_to_phys(smmu->as[asid].pdir_page));
1573                         else
1574                         rv += sprintf(buf + rv , "\n");
1575         }
1576         rv += sprintf(buf + rv , "     enable: %s\n",
1577                         smmu->enable ? "yes" : "no");
1578         return rv;
1579 }
1580
1581 static struct device_attribute _attr_show_smmu
1582                  = __ATTR(show_smmu, S_IRUGO, _sysfs_show_smmu, NULL);
1583
1584 #define _SYSFS_SHOW_VALUE(name, field, fmt)             \
1585 static ssize_t _sysfs_show_##name(struct device *d,     \
1586         struct device_attribute *da, char *buf)         \
1587 {                                                       \
1588         struct smmu_device *smmu =                      \
1589                 container_of(d, struct smmu_device, sysfs_dev); \
1590         return sprintf(buf, fmt "\n", smmu->field);     \
1591 }
1592
1593 static void (*_sysfs_null_callback)(struct smmu_device *, unsigned long *) =
1594         NULL;
1595
1596 #define _SYSFS_SET_VALUE_DO(name, field, base, ceil, callback, challenge) \
1597 static ssize_t _sysfs_set_##name(struct device *d,              \
1598                 struct device_attribute *da, const char *buf, size_t count) \
1599 {                                                               \
1600         unsigned long value;                                    \
1601         struct smmu_device *smmu =                              \
1602                 container_of(d, struct smmu_device, sysfs_dev); \
1603         if (kstrtoul(buf, base, &value))                        \
1604                 return count;                                   \
1605         if (challenge && 0 <= value && value < ceil) {          \
1606                 smmu->field = value;                            \
1607                 if (callback)                                   \
1608                         callback(smmu, &smmu->field);           \
1609         }                                                       \
1610         smmu->challenge_pid = 0;                                \
1611         return count;                                           \
1612 }
1613 #ifdef CONFIG_TEGRA_IOVMM_SMMU_SYSFS
1614 #define _SYSFS_SET_VALUE(name, field, base, ceil, callback)     \
1615         _SYSFS_SET_VALUE_DO(name, field, base, ceil, callback, 1)
1616 #else
1617 #define _SYSFS_SET_VALUE(name, field, base, ceil, callback)     \
1618         _SYSFS_SET_VALUE_DO(name, field, base, ceil, callback,  \
1619                 (smmu->challenge_pid == current->pid))
1620 #endif
1621
1622 _SYSFS_SHOW_VALUE(lowest_asid, lowest_asid, "%lu")
1623 _SYSFS_SET_VALUE(lowest_asid, lowest_asid, 10,
1624                 MC_SMMU_NUM_ASIDS, _sysfs_null_callback)
1625 _SYSFS_SHOW_VALUE(debug_asid, debug_asid, "%lu")
1626 _SYSFS_SET_VALUE(debug_asid, debug_asid, 10,
1627                 MC_SMMU_NUM_ASIDS, _sysfs_null_callback)
1628 _SYSFS_SHOW_VALUE(signature_pid, signature_pid, "%lu")
1629 _SYSFS_SET_VALUE_DO(signature_pid, signature_pid, 10, PID_MAX_LIMIT+1,
1630                 _sysfs_null_callback, 1)
1631
1632 /*
1633  * Protection for sysfs entries from accidental writing
1634  *   /sys/devices/smmu/chanllenge_code returns a random number.
1635  *   The process writes back pid^challenge to /sys/devices/smmu/challenge_code.
1636  *   The process will be able to alter a protected entry.
1637  *   The challenge code is reset.
1638  */
1639 static ssize_t _sysfs_show_challenge_code(struct device *d,
1640         struct device_attribute *da, char *buf)
1641 {
1642         struct smmu_device *smmu =
1643                 container_of(d, struct smmu_device, sysfs_dev);
1644         smmu->challenge_pid = 0;
1645         smmu->challenge_code = random32();
1646         return sprintf(buf, "%lx\n", smmu->challenge_code);
1647 }
1648
1649 static ssize_t _sysfs_set_challenge_code(struct device *d,
1650                 struct device_attribute *da, const char *buf, size_t count)
1651 {
1652         struct smmu_device *smmu =
1653                 container_of(d, struct smmu_device, sysfs_dev);
1654         unsigned long value;
1655         if (!kstrtoul(buf, 16, &value)) {
1656                 smmu->challenge_pid = smmu->challenge_code ^ value;
1657                 smmu->challenge_code = random32();
1658         }
1659         return count;
1660 }
1661
1662 /*
1663  * "echo 's d' > /sys/devices/smmu/copy_pdir" copies ASID s's pdir pointer
1664  * to ASID d. -1 as s resets d's pdir to null.
1665  */
1666 static ssize_t _sysfs_copy_pdir(struct device *d,
1667                 struct device_attribute *da, const char *buf, size_t count)
1668 {
1669         struct smmu_device *smmu =
1670                 container_of(d, struct smmu_device, sysfs_dev);
1671         long fr, to;
1672
1673         if (kstrtol(buf, 16, &fr))
1674                 return count;
1675         while (isxdigit(*buf))
1676                 buf++;
1677         while (isspace(*buf))
1678                 buf++;
1679         if (kstrtol(buf, 16, &to))
1680                 return count;
1681
1682         if (good_challenge(smmu) && fr != to &&
1683                 fr < smmu->num_ases && 0 <= to && to < smmu->num_ases) {
1684                 spin_lock(&smmu->lock);
1685                 writel(MC_SMMU_PTB_ASID_0_CURRENT_ASID(to),
1686                         smmu->regs_mc + MC_SMMU_PTB_ASID_0);
1687                 writel((fr >= 0)
1688                 ? SMMU_MK_PDIR(smmu->as[fr].pdir_page, smmu->as[fr].pdir_attr)
1689                 : MC_SMMU_PTB_DATA_0_RESET_VAL,
1690                         smmu->regs_mc + MC_SMMU_PTB_DATA_0);
1691                 smmu->as[to].pdir_page = (fr >= 0) ? smmu->as[fr].pdir_page : 0;
1692                 spin_unlock(&smmu->lock);
1693         }
1694         return count;
1695 }
1696
1697 static void _sysfs_mask_attr(struct smmu_device *smmu, unsigned long *field)
1698 {
1699         *field &= _MASK_ATTR;
1700 }
1701
1702 static void _sysfs_mask_pdir_attr(struct smmu_device *smmu,
1703         unsigned long *field)
1704 {
1705         unsigned long pdir;
1706
1707         _sysfs_mask_attr(smmu, field);
1708         spin_lock(&smmu->lock);
1709         writel(MC_SMMU_PTB_ASID_0_CURRENT_ASID(smmu->debug_asid),
1710                 smmu->regs_mc + MC_SMMU_PTB_ASID_0);
1711         pdir = readl(smmu->regs_mc + MC_SMMU_PTB_DATA_0);
1712         pdir &= ~_MASK_ATTR;
1713         pdir |= *field;
1714         writel(pdir, smmu->regs_mc + MC_SMMU_PTB_DATA_0);
1715         spin_unlock(&smmu->lock);
1716         flush_smmu_regs(smmu);
1717 }
1718
1719 static void (*_sysfs_mask_attr_callback)(struct smmu_device *,
1720                                 unsigned long *field) = &_sysfs_mask_attr;
1721 static void (*_sysfs_mask_pdir_attr_callback)(struct smmu_device *,
1722                                 unsigned long *field) = &_sysfs_mask_pdir_attr;
1723
1724 _SYSFS_SHOW_VALUE(pdir_attr, as[smmu->debug_asid].pdir_attr, "%lx")
1725 _SYSFS_SET_VALUE(pdir_attr, as[smmu->debug_asid].pdir_attr, 16,
1726                 _PDIR_ATTR + 1, _sysfs_mask_pdir_attr_callback)
1727 _SYSFS_SHOW_VALUE(pde_attr, as[smmu->debug_asid].pde_attr, "%lx")
1728 _SYSFS_SET_VALUE(pde_attr, as[smmu->debug_asid].pde_attr, 16,
1729                 _PDE_ATTR + 1, _sysfs_mask_attr_callback)
1730 _SYSFS_SHOW_VALUE(pte_attr, as[smmu->debug_asid].pte_attr, "%lx")
1731 _SYSFS_SET_VALUE(pte_attr, as[smmu->debug_asid].pte_attr, 16,
1732                 _PTE_ATTR + 1, _sysfs_mask_attr_callback)
1733
1734 static struct device_attribute _attr_values[] = {
1735         __ATTR(lowest_asid, S_IRUGO | S_IWUSR,
1736                 _sysfs_show_lowest_asid, _sysfs_set_lowest_asid),
1737         __ATTR(debug_asid, S_IRUGO | S_IWUSR,
1738                 _sysfs_show_debug_asid, _sysfs_set_debug_asid),
1739         __ATTR(signature_pid, S_IRUGO | S_IWUSR,
1740                 _sysfs_show_signature_pid, _sysfs_set_signature_pid),
1741         __ATTR(challenge_code, S_IRUGO | S_IWUSR,
1742                 _sysfs_show_challenge_code, _sysfs_set_challenge_code),
1743         __ATTR(copy_pdir, S_IWUSR, NULL, _sysfs_copy_pdir),
1744
1745         __ATTR(pdir_attr, S_IRUGO | S_IWUSR,
1746                 _sysfs_show_pdir_attr, _sysfs_set_pdir_attr),
1747         __ATTR(pde_attr, S_IRUGO | S_IWUSR,
1748                 _sysfs_show_pde_attr, _sysfs_set_pde_attr),
1749         __ATTR(pte_attr, S_IRUGO | S_IWUSR,
1750                 _sysfs_show_pte_attr, _sysfs_set_pte_attr),
1751 };
1752
1753 static struct attribute *_smmu_attrs[
1754         ARRAY_SIZE(_smmu_reg_name_map) + ARRAY_SIZE(_attr_values) + 3];
1755 static struct attribute_group _smmu_attr_group = {
1756         .attrs = _smmu_attrs
1757 };
1758
1759 static void _sysfs_smmu(struct smmu_device *smmu, struct device *parent)
1760 {
1761         int i, j;
1762
1763         if (smmu->sysfs_use_count++ > 0)
1764                 return;
1765         for (i = 0; i < ARRAY_SIZE(_smmu_reg_name_map); i++) {
1766                 attr_name(_smmu_reg_name_map[i].dev_attr) =
1767                         _smmu_reg_name_map[i].name;
1768                 _smmu_attrs[i] = &_smmu_reg_name_map[i].dev_attr.attr;
1769         }
1770         for (j = 0; j < ARRAY_SIZE(_attr_values); j++)
1771                 _smmu_attrs[i++] = &_attr_values[j].attr;
1772         _smmu_attrs[i++] = &_attr_show_smmu.attr;
1773         _smmu_attrs[i] = NULL;
1774
1775         dev_set_name(&smmu->sysfs_dev, "smmu");
1776         smmu->sysfs_dev.parent = parent;
1777         smmu->sysfs_dev.driver = NULL;
1778         smmu->sysfs_dev.release = NULL;
1779         if (device_register(&smmu->sysfs_dev)) {
1780                 pr_err("%s: failed to register smmu_sysfs_dev\n", __func__);
1781                 smmu->sysfs_use_count--;
1782                 return;
1783         }
1784         if (sysfs_create_group(&smmu->sysfs_dev.kobj, &_smmu_attr_group)) {
1785                 pr_err("%s: failed to create group for smmu_sysfs_dev\n",
1786                         __func__);
1787                 smmu->sysfs_use_count--;
1788                 return;
1789         }
1790 }
1791
1792 static void _sysfs_create(struct smmu_as *as, struct device *parent)
1793 {
1794         _sysfs_smmu(as->smmu, parent);
1795 }