2 * arch/arm/mach-tegra/iovmm-smmu.c
4 * Tegra I/O VMM implementation for SMMU devices for Tegra 3 series
7 * Copyright (c) 2010-2012, NVIDIA CORPORATION. All rights reserved.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
24 #include <linux/module.h>
25 #include <linux/platform_device.h>
26 #include <linux/spinlock.h>
27 #include <linux/slab.h>
28 #include <linux/vmalloc.h>
30 #include <linux/pagemap.h>
31 #include <linux/sysfs.h>
32 #include <linux/device.h>
33 #include <linux/sched.h>
35 #include <linux/random.h>
36 #include <linux/ctype.h>
37 #include <linux/debugfs.h>
38 #include <linux/seq_file.h>
41 #include <asm/cacheflush.h>
43 #include <mach/iovmm.h>
44 #include <mach/iomap.h>
45 #include <mach/tegra_smmu.h>
48 * Macros without __ copied from armc.h
50 #define MC_INTSTATUS_0 0x0
51 #define MC_ERR_STATUS_0 0x8
52 #define MC_ERR_ADR_0 0xc
54 #define MC_SMMU_CONFIG_0 0x10
55 #define MC_SMMU_CONFIG_0_SMMU_ENABLE_DISABLE 0
56 #define MC_SMMU_CONFIG_0_SMMU_ENABLE_ENABLE 1
58 #define MC_SMMU_TLB_CONFIG_0 0x14
59 #define MC_SMMU_TLB_CONFIG_0_TLB_STATS_ENABLE__MASK (1 << 31)
60 #define MC_SMMU_TLB_CONFIG_0_TLB_STATS_ENABLE (1 << 31)
61 #define MC_SMMU_TLB_CONFIG_0_TLB_STATS_TEST__MASK (1 << 30)
62 #define MC_SMMU_TLB_CONFIG_0_TLB_STATS_TEST (1 << 30)
63 #ifdef CONFIG_ARCH_TEGRA_3x_SOC
64 #define MC_SMMU_TLB_CONFIG_0_TLB_ACTIVE_LINES__VALUE 0x10
65 #define MC_SMMU_TLB_CONFIG_0_RESET_VAL 0x20000010
67 #define MC_SMMU_TLB_CONFIG_0_TLB_ACTIVE_LINES__VALUE 0x20
68 #define MC_SMMU_TLB_CONFIG_0_RESET_VAL 0x20000020
71 #define MC_SMMU_PTC_CONFIG_0 0x18
72 #define MC_SMMU_PTC_CONFIG_0_PTC_STATS_ENABLE__MASK (1 << 31)
73 #define MC_SMMU_PTC_CONFIG_0_PTC_STATS_ENABLE (1 << 31)
74 #define MC_SMMU_PTC_CONFIG_0_PTC_STATS_TEST__MASK (1 << 30)
75 #define MC_SMMU_PTC_CONFIG_0_PTC_STATS_TEST (1 << 30)
76 #define MC_SMMU_PTC_CONFIG_0_PTC_INDEX_MAP__PATTERN 0x3f
77 #define MC_SMMU_PTC_CONFIG_0_RESET_VAL 0x2000003f
79 #define MC_SMMU_STATS_CONFIG_MASK \
80 MC_SMMU_PTC_CONFIG_0_PTC_STATS_ENABLE__MASK
81 #define MC_SMMU_STATS_CONFIG_ENABLE \
82 MC_SMMU_PTC_CONFIG_0_PTC_STATS_ENABLE
83 #define MC_SMMU_STATS_CONFIG_TEST \
84 MC_SMMU_PTC_CONFIG_0_PTC_STATS_TEST
86 #define MC_SMMU_PTB_ASID_0 0x1c
87 #define MC_SMMU_PTB_ASID_0_CURRENT_ASID_SHIFT 0
89 #define MC_SMMU_PTB_DATA_0 0x20
90 #define MC_SMMU_PTB_DATA_0_RESET_VAL 0
91 #define MC_SMMU_PTB_DATA_0_ASID_NONSECURE_SHIFT 29
92 #define MC_SMMU_PTB_DATA_0_ASID_WRITABLE_SHIFT 30
93 #define MC_SMMU_PTB_DATA_0_ASID_READABLE_SHIFT 31
95 #define MC_SMMU_TLB_FLUSH_0 0x30
96 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_MATCH_ALL 0
97 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_MATCH_SECTION 2
98 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_MATCH_GROUP 3
99 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_SHIFT 29
100 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_DISABLE 0
101 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_ENABLE 1
102 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_SHIFT 31
104 #define MC_SMMU_PTC_FLUSH_0 0x34
105 #define MC_SMMU_PTC_FLUSH_0_PTC_FLUSH_TYPE_ALL 0
106 #define MC_SMMU_PTC_FLUSH_0_PTC_FLUSH_TYPE_ADR 1
107 #define MC_SMMU_PTC_FLUSH_0_PTC_FLUSH_ADR_SHIFT 4
109 #define MC_SMMU_ASID_SECURITY_0 0x38
110 #define MC_EMEM_CFG_0 0x50
111 #define MC_SECURITY_CFG0_0 0x70
112 #define MC_SECURITY_CFG1_0 0x74
113 #define MC_SECURITY_CFG2_0 0x78
114 #define MC_SECURITY_RSV_0 0x7c
116 #define MC_SMMU_STATS_TLB_HIT_COUNT_0 0x1f0
117 #define MC_SMMU_STATS_TLB_MISS_COUNT_0 0x1f4
118 #define MC_SMMU_STATS_PTC_HIT_COUNT_0 0x1f8
119 #define MC_SMMU_STATS_PTC_MISS_COUNT_0 0x1fc
121 #define MC_SMMU_TRANSLATION_ENABLE_0_0 0x228
122 #define MC_SMMU_TRANSLATION_ENABLE_1_0 0x22c
123 #define MC_SMMU_TRANSLATION_ENABLE_2_0 0x230
125 #define MC_SMMU_AFI_ASID_0 0x238 /* PCIE (T30) */
126 #define MC_SMMU_AVPC_ASID_0 0x23c /* AVP */
127 #define MC_SMMU_DC_ASID_0 0x240 /* Display controller */
128 #define MC_SMMU_DCB_ASID_0 0x244 /* Display controller B */
129 #define MC_SMMU_EPP_ASID_0 0x248 /* Encoder pre-processor */
130 #define MC_SMMU_G2_ASID_0 0x24c /* 2D engine */
131 #define MC_SMMU_HC_ASID_0 0x250 /* Host1x */
132 #define MC_SMMU_HDA_ASID_0 0x254 /* High-def audio */
133 #define MC_SMMU_ISP_ASID_0 0x258 /* Image signal processor */
134 #define MC_SMMU_MPE_ASID_0 0x264 /* MPEG encoder (T30) */
135 #define MC_SMMU_MSENC_ASID_0 0x264 /* MPEG encoder (T11x) */
136 #define MC_SMMU_NV_ASID_0 0x268 /* 3D */
137 #define MC_SMMU_NV2_ASID_0 0x26c /* 3D secondary (T30) */
138 #define MC_SMMU_PPCS_ASID_0 0x270 /* AHB */
139 #define MC_SMMU_SATA_ASID_0 0x278 /* SATA (T30) */
140 #define MC_SMMU_VDE_ASID_0 0x27c /* Video decoder */
141 #define MC_SMMU_VI_ASID_0 0x280 /* Video input */
142 #define MC_SMMU_XUSB_HOST_ASID_0 0x288 /* USB host (T11x) */
143 #define MC_SMMU_XUSB_DEV_ASID_0 0x28c /* USB dev (T11x) */
144 #define MC_SMMU_TSEC_ASID_0 0x294 /* TSEC (T11x) */
145 #define MC_SMMU_PPCS1_ASID_0 0x298 /* AHB secondary (T11x) */
150 #define MC_STAT_CONTROL_0 0x100
151 #define MC_STAT_EMC_CLOCKS_0 0x110
152 #define MC_STAT_EMC_FILTER_SET0_ADR_LIMIT_LO_0 0x118
153 #define MC_STAT_EMC_FILTER_SET0_ADR_LIMIT_HI_0 0x11c
154 #define MC_STAT_EMC_FILTER_SET0_CLIENT_0_0 0x128
155 #define MC_STAT_EMC_FILTER_SET0_CLIENT_1_0 0x12c
156 #define MC_STAT_EMC_FILTER_SET0_CLIENT_2_0 0x130
157 #define MC_STAT_EMC_SET0_COUNT_0 0x138
158 #define MC_STAT_EMC_SET0_COUNT_MSBS_0 0x13c
159 #define MC_STAT_EMC_FILTER_SET1_ADR_LIMIT_LO_0 0x158
160 #define MC_STAT_EMC_FILTER_SET1_ADR_LIMIT_HI_0 0x15c
161 #define MC_STAT_EMC_FILTER_SET1_CLIENT_0_0 0x168
162 #define MC_STAT_EMC_FILTER_SET1_CLIENT_1_0 0x16c
163 #define MC_STAT_EMC_FILTER_SET1_CLIENT_2_0 0x170
164 #define MC_STAT_EMC_SET1_COUNT_0 0x178
165 #define MC_STAT_EMC_SET1_COUNT_MSBS_0 0x17c
166 #define MC_STAT_EMC_FILTER_SET0_VIRTUAL_ADR_LIMIT_LO_0 0x198
167 #define MC_STAT_EMC_FILTER_SET0_VIRTUAL_ADR_LIMIT_HI_0 0x19c
168 #define MC_STAT_EMC_FILTER_SET0_ASID_0 0x1a0
169 #define MC_STAT_EMC_FILTER_SET1_VIRTUAL_ADR_LIMIT_LO_0 0x1a8
170 #define MC_STAT_EMC_FILTER_SET1_VIRTUAL_ADR_LIMIT_HI_0 0x1ac
171 #define MC_STAT_EMC_FILTER_SET1_ASID_0 0x1b0
174 * Copied from arahb_arbc.h
176 #ifndef CONFIG_ARCH_TEGRA_3x_SOC
177 #define AHB_MASTER_SWID_0 0x18
179 #define AHB_ARBITRATION_XBAR_CTRL_0 0xe0
180 #define AHB_ARBITRATION_XBAR_CTRL_0_SMMU_INIT_DONE_DONE 1
181 #define AHB_ARBITRATION_XBAR_CTRL_0_SMMU_INIT_DONE_SHIFT 17
184 * Copied from arapbdma.h
186 #ifndef CONFIG_ARCH_TEGRA_3x_SOC
187 #define APBDMA_CHANNEL_SWID_0 0x3c
190 #define MC_SMMU_NUM_ASIDS 4
191 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_SECTION__MASK 0xffc00000
192 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_SECTION__SHIFT 12 /* right shift */
193 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_GROUP__MASK 0xffffc000
194 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_GROUP__SHIFT 12 /* right shift */
195 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA(iova, which) \
196 ((((iova) & MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_##which##__MASK) >> \
197 MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_##which##__SHIFT) | \
198 MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_MATCH_##which)
199 #define MC_SMMU_PTB_ASID_0_CURRENT_ASID(n) \
200 ((n) << MC_SMMU_PTB_ASID_0_CURRENT_ASID_SHIFT)
201 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_disable \
202 (MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_DISABLE << \
203 MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_SHIFT)
204 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH__ENABLE \
205 (MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_ENABLE << \
206 MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_SHIFT)
208 #define VMM_NAME "iovmm-smmu"
209 #define DRIVER_NAME "tegra_smmu"
211 #define SMMU_PAGE_SHIFT 12
212 #define SMMU_PAGE_SIZE (1 << SMMU_PAGE_SHIFT)
214 #define SMMU_PDIR_COUNT 1024
215 #define SMMU_PDIR_SIZE (sizeof(unsigned long) * SMMU_PDIR_COUNT)
216 #define SMMU_PTBL_COUNT 1024
217 #define SMMU_PTBL_SIZE (sizeof(unsigned long) * SMMU_PTBL_COUNT)
218 #define SMMU_PDIR_SHIFT 12
219 #define SMMU_PDE_SHIFT 12
220 #define SMMU_PTE_SHIFT 12
221 #define SMMU_PFN_MASK 0x000fffff
223 #define SMMU_PDE_NEXT_SHIFT 28
225 #define SMMU_ADDR_TO_PFN(addr) ((addr) >> 12)
226 #define SMMU_ADDR_TO_PDN(addr) ((addr) >> 22)
227 #define SMMU_PDN_TO_ADDR(pdn) ((pdn) << 22)
229 #define _READABLE (1 << MC_SMMU_PTB_DATA_0_ASID_READABLE_SHIFT)
230 #define _WRITABLE (1 << MC_SMMU_PTB_DATA_0_ASID_WRITABLE_SHIFT)
231 #define _NONSECURE (1 << MC_SMMU_PTB_DATA_0_ASID_NONSECURE_SHIFT)
232 #define _PDE_NEXT (1 << SMMU_PDE_NEXT_SHIFT)
233 #define _MASK_ATTR (_READABLE | _WRITABLE | _NONSECURE)
235 #define _PDIR_ATTR (_READABLE | _WRITABLE | _NONSECURE)
237 #define _PDE_ATTR (_READABLE | _WRITABLE | _NONSECURE)
238 #define _PDE_ATTR_N (_PDE_ATTR | _PDE_NEXT)
239 #define _PDE_VACANT(pdn) (((pdn) << 10) | _PDE_ATTR)
241 #define _PTE_ATTR (_READABLE | _WRITABLE | _NONSECURE)
242 #define _PTE_VACANT(addr) (((addr) >> SMMU_PAGE_SHIFT) | _PTE_ATTR)
244 #define SMMU_MK_PDIR(page, attr) \
245 ((page_to_phys(page) >> SMMU_PDIR_SHIFT) | (attr))
246 #define SMMU_MK_PDE(page, attr) \
247 (unsigned long)((page_to_phys(page) >> SMMU_PDE_SHIFT) | (attr))
248 #define SMMU_EX_PTBL_PAGE(pde) \
249 pfn_to_page((unsigned long)(pde) & SMMU_PFN_MASK)
250 #define SMMU_PFN_TO_PTE(pfn, attr) (unsigned long)((pfn) | (attr))
252 #define SMMU_ASID_ENABLE(asid) ((asid) | (1 << 31))
253 #define SMMU_ASID_DISABLE 0
254 #define SMMU_ASID_ASID(n) ((n) & ~SMMU_ASID_ENABLE(0))
256 #ifdef CONFIG_ARCH_TEGRA_3x_SOC
276 #ifdef CONFIG_ARCH_TEGRA_11x_SOC
297 /* Keep this as a "natural" enumeration (no assignments) */
299 #define op(c) HWC_##c,
305 struct smmu_hwc_state {
307 unsigned long enable_disable;
310 /* Hardware client mapping initializer */
311 #define HWC_INIT(client) \
312 [HWC_##client] = {MC_SMMU_##client##_ASID_0, SMMU_ASID_DISABLE},
314 static const struct smmu_hwc_state smmu_hwc_state_init[] = {
315 #define op(c) HWC_INIT(c)
321 struct domain_hwc_map {
322 const char *dev_name;
323 const enum smmu_hwclient *hwcs;
324 const unsigned int nr_hwcs;
327 /* Enable all hardware clients for SMMU translation */
328 static const enum smmu_hwclient nvmap_hwcs[] = {
329 #define op(c) HWC_##c,
334 static const struct domain_hwc_map smmu_hwc_map[] = {
338 .nr_hwcs = ARRAY_SIZE(nvmap_hwcs),
346 struct smmu_device *smmu; /* back pointer to container */
348 const struct domain_hwc_map *hwclients;
349 struct mutex lock; /* for pagetable */
350 struct tegra_iovmm_domain domain;
351 struct page *pdir_page;
352 unsigned long pdir_attr;
353 unsigned long pde_attr;
354 unsigned long pte_attr;
355 unsigned int *pte_count;
356 struct device sysfs_dev;
361 * Register bank index
365 #ifdef TEGRA_MC0_BASE
368 #ifdef TEGRA_MC1_BASE
376 static const struct {
379 } tegra_reg[_REGS] = {
380 [_MC] = {TEGRA_MC_BASE, TEGRA_MC_SIZE},
381 #ifdef TEGRA_MC0_BASE
382 [_MC0] = {TEGRA_MC0_BASE, TEGRA_MC0_SIZE},
384 #ifdef TEGRA_MC1_BASE
385 [_MC1] = {TEGRA_MC1_BASE, TEGRA_MC1_SIZE},
387 [_AHBARB] = {TEGRA_AHB_ARB_BASE, TEGRA_AHB_ARB_SIZE},
388 [_APBDMA] = {TEGRA_APB_DMA_BASE, TEGRA_APB_DMA_SIZE},
392 * Aliases for register bank base addres holders (remapped)
394 #define regs_mc regs[_MC]
395 #define regs_mc0 regs[_MC0]
396 #define regs_mc1 regs[_MC1]
397 #define regs_ahbarb regs[_AHBARB]
398 #define regs_apbdma regs[_APBDMA]
404 void __iomem *regs[_REGS];
405 tegra_iovmm_addr_t iovmm_base; /* remappable base address */
406 unsigned long page_count; /* total remappable size */
409 struct tegra_iovmm_device iovmm_dev;
411 struct smmu_as *as; /* Run-time allocated array */
412 struct smmu_hwc_state hwc_state[HWC_COUNT];
413 struct device sysfs_dev;
416 #ifdef CONFIG_ARCH_TEGRA_3x_SOC
417 struct page *avp_vector_page; /* dummy page shared by all AS's */
420 * Register image savers for suspend/resume
422 unsigned long translation_enable_0_0;
423 unsigned long translation_enable_1_0;
424 unsigned long translation_enable_2_0;
425 unsigned long asid_security_0;
427 unsigned long lowest_asid; /* Variables for hardware testing */
428 unsigned long debug_asid;
429 unsigned long signature_pid; /* For debugging aid */
430 unsigned long challenge_code; /* For debugging aid */
431 unsigned long challenge_pid; /* For debugging aid */
434 struct dentry *debugfs_root;
437 #define VA_PAGE_TO_PA(va, page) \
438 (page_to_phys(page) + ((unsigned long)(va) & ~PAGE_MASK))
441 flush_cpu_dcache(void *va, struct page *page, size_t size)
443 unsigned long _pa_ = VA_PAGE_TO_PA((unsigned long)va, page);
444 __cpuc_flush_dcache_area((void *)(va), (size_t)(size));
445 outer_flush_range(_pa_, _pa_+(size_t)(size));
449 * Any interaction between any block on PPSB and a block on APB or AHB
450 * must have these read-back to ensure the APB/AHB bus transaction is
451 * complete before initiating activity on the PPSB block.
453 static inline void flush_smmu_regs(struct smmu_device *smmu)
455 (void)readl((smmu)->regs_mc + MC_SMMU_CONFIG_0);
459 * Flush all TLB entries and all PTC entries
460 * Caller must lock smmu
462 static void smmu_flush_regs(struct smmu_device *smmu, int enable)
464 writel(MC_SMMU_PTC_FLUSH_0_PTC_FLUSH_TYPE_ALL,
465 smmu->regs_mc + MC_SMMU_PTC_FLUSH_0);
466 flush_smmu_regs(smmu);
467 writel(MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_MATCH_ALL |
468 MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_disable,
469 smmu->regs_mc + MC_SMMU_TLB_FLUSH_0);
472 writel(MC_SMMU_CONFIG_0_SMMU_ENABLE_ENABLE,
473 smmu->regs_mc + MC_SMMU_CONFIG_0);
475 flush_smmu_regs(smmu);
478 static void smmu_setup_regs(struct smmu_device *smmu)
485 /* Set/restore page directory for each AS */
486 for (asid = 0; asid < smmu->num_ases; asid++) {
487 struct smmu_as *as = &smmu->as[asid];
489 writel(MC_SMMU_PTB_ASID_0_CURRENT_ASID(as->asid),
490 smmu->regs_mc + MC_SMMU_PTB_ASID_0);
492 ? SMMU_MK_PDIR(as->pdir_page, as->pdir_attr)
493 : MC_SMMU_PTB_DATA_0_RESET_VAL,
494 smmu->regs_mc + MC_SMMU_PTB_DATA_0);
498 /* Set/restore ASID for each hardware client */
499 for (i = 0; i < HWC_COUNT; i++) {
500 struct smmu_hwc_state *hwcst = &smmu->hwc_state[i];
501 writel(hwcst->enable_disable, smmu->regs_mc + hwcst->reg);
504 writel(smmu->translation_enable_0_0,
505 smmu->regs_mc + MC_SMMU_TRANSLATION_ENABLE_0_0);
506 writel(smmu->translation_enable_1_0,
507 smmu->regs_mc + MC_SMMU_TRANSLATION_ENABLE_1_0);
508 writel(smmu->translation_enable_2_0,
509 smmu->regs_mc + MC_SMMU_TRANSLATION_ENABLE_2_0);
510 writel(smmu->asid_security_0,
511 smmu->regs_mc + MC_SMMU_ASID_SECURITY_0);
512 writel(MC_SMMU_TLB_CONFIG_0_RESET_VAL,
513 smmu->regs_mc + MC_SMMU_TLB_CONFIG_0);
514 writel(MC_SMMU_PTC_CONFIG_0_RESET_VAL,
515 smmu->regs_mc + MC_SMMU_PTC_CONFIG_0);
517 smmu_flush_regs(smmu, 1);
519 readl(smmu->regs_ahbarb + AHB_ARBITRATION_XBAR_CTRL_0) |
520 (AHB_ARBITRATION_XBAR_CTRL_0_SMMU_INIT_DONE_DONE <<
521 AHB_ARBITRATION_XBAR_CTRL_0_SMMU_INIT_DONE_SHIFT),
522 smmu->regs_ahbarb + AHB_ARBITRATION_XBAR_CTRL_0);
525 static int smmu_suspend(struct tegra_iovmm_device *dev)
527 struct smmu_device *smmu =
528 container_of(dev, struct smmu_device, iovmm_dev);
530 smmu->translation_enable_0_0 =
531 readl(smmu->regs_mc + MC_SMMU_TRANSLATION_ENABLE_0_0);
532 smmu->translation_enable_1_0 =
533 readl(smmu->regs_mc + MC_SMMU_TRANSLATION_ENABLE_1_0);
534 smmu->translation_enable_2_0 =
535 readl(smmu->regs_mc + MC_SMMU_TRANSLATION_ENABLE_2_0);
536 smmu->asid_security_0 =
537 readl(smmu->regs_mc + MC_SMMU_ASID_SECURITY_0);
541 static void smmu_resume(struct tegra_iovmm_device *dev)
543 struct smmu_device *smmu =
544 container_of(dev, struct smmu_device, iovmm_dev);
549 spin_lock(&smmu->lock);
550 smmu_setup_regs(smmu);
551 spin_unlock(&smmu->lock);
554 static void flush_ptc_and_tlb(struct smmu_device *smmu,
555 struct smmu_as *as, unsigned long iova,
556 unsigned long *pte, struct page *ptpage, int is_pde)
558 unsigned long tlb_flush_va = is_pde
559 ? MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA(iova, SECTION)
560 : MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA(iova, GROUP);
562 writel(MC_SMMU_PTC_FLUSH_0_PTC_FLUSH_TYPE_ADR |
563 VA_PAGE_TO_PA(pte, ptpage),
564 smmu->regs_mc + MC_SMMU_PTC_FLUSH_0);
565 flush_smmu_regs(smmu);
566 writel(tlb_flush_va |
567 MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH__ENABLE |
568 (as->asid << MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_SHIFT),
569 smmu->regs_mc + MC_SMMU_TLB_FLUSH_0);
570 flush_smmu_regs(smmu);
573 static void free_ptbl(struct smmu_as *as, unsigned long iova)
575 unsigned long pdn = SMMU_ADDR_TO_PDN(iova);
576 unsigned long *pdir = (unsigned long *)kmap(as->pdir_page);
578 if (pdir[pdn] != _PDE_VACANT(pdn)) {
579 pr_debug("%s:%d pdn=%lx\n", __func__, __LINE__, pdn);
581 ClearPageReserved(SMMU_EX_PTBL_PAGE(pdir[pdn]));
582 __free_page(SMMU_EX_PTBL_PAGE(pdir[pdn]));
583 pdir[pdn] = _PDE_VACANT(pdn);
584 flush_cpu_dcache(&pdir[pdn], as->pdir_page, sizeof pdir[pdn]);
585 flush_ptc_and_tlb(as->smmu, as, iova, &pdir[pdn],
588 kunmap(as->pdir_page);
591 static void free_pdir(struct smmu_as *as)
594 unsigned addr = as->smmu->iovmm_base;
595 int count = as->smmu->page_count;
597 while (count-- > 0) {
599 addr += SMMU_PAGE_SIZE * SMMU_PTBL_COUNT;
601 ClearPageReserved(as->pdir_page);
602 __free_page(as->pdir_page);
603 as->pdir_page = NULL;
604 kfree(as->pte_count);
605 as->pte_count = NULL;
609 static const char * const smmu_debugfs_mc[] = {
611 #ifdef TEGRA_MC0_BASE
614 #ifdef TEGRA_MC1_BASE
619 static const char * const smmu_debugfs_cache[] = { "tlb", "ptc", };
621 static ssize_t smmu_debugfs_stats_write(struct file *file,
622 const char __user *buffer,
623 size_t count, loff_t *pos)
626 struct dentry *cache, *mc, *root;
627 struct smmu_device *smmu;
628 int mc_idx, cache_idx, i;
630 const char * const smmu_debugfs_stats_ctl[] = { "off", "on", "reset"};
631 char str[] = "reset";
633 count = min_t(size_t, count, sizeof(str));
634 if (copy_from_user(str, buffer, count))
637 for (i = 0; i < ARRAY_SIZE(smmu_debugfs_stats_ctl); i++)
638 if (strncmp(str, smmu_debugfs_stats_ctl[i],
639 strlen(smmu_debugfs_stats_ctl[i])) == 0)
642 if (i == ARRAY_SIZE(smmu_debugfs_stats_ctl))
645 cache = file->f_dentry;
646 inode = cache->d_inode;
647 cache_idx = (int)inode->i_private;
648 mc = cache->d_parent;
649 mc_idx = (int)mc->d_inode->i_private;
651 smmu = root->d_inode->i_private;
653 offs = MC_SMMU_TLB_CONFIG_0;
654 offs += sizeof(u32) * cache_idx;
655 offs += 2 * sizeof(u32) * ARRAY_SIZE(smmu_debugfs_cache) * mc_idx;
657 val = readl(smmu->regs + offs);
660 val &= ~MC_SMMU_STATS_CONFIG_ENABLE;
661 val &= ~MC_SMMU_STATS_CONFIG_TEST;
662 writel(val, smmu->regs + offs);
665 val |= MC_SMMU_STATS_CONFIG_ENABLE;
666 val &= ~MC_SMMU_STATS_CONFIG_TEST;
667 writel(val, smmu->regs + offs);
670 val |= MC_SMMU_STATS_CONFIG_TEST;
671 writel(val, smmu->regs + offs);
672 val &= ~MC_SMMU_STATS_CONFIG_TEST;
673 writel(val, smmu->regs + offs);
680 pr_debug("%s() %08x, %08x @%08x\n", __func__,
681 val, readl(smmu->regs + offs), offs);
686 static int smmu_debugfs_stats_show(struct seq_file *s, void *v)
689 struct dentry *cache, *mc, *root;
690 struct smmu_device *smmu;
691 int mc_idx, cache_idx, i;
693 const char * const smmu_debugfs_stats[] = { "hit", "miss", };
697 cache = d_find_alias(inode);
698 cache_idx = (int)inode->i_private;
699 mc = cache->d_parent;
700 mc_idx = (int)mc->d_inode->i_private;
702 smmu = root->d_inode->i_private;
704 offs = MC_SMMU_STATS_TLB_HIT_COUNT_0;
705 offs += ARRAY_SIZE(smmu_debugfs_stats) * sizeof(u32) * cache_idx;
706 offs += ARRAY_SIZE(smmu_debugfs_stats) * sizeof(u32) *
707 ARRAY_SIZE(smmu_debugfs_cache) * mc_idx;
709 for (i = 0; i < ARRAY_SIZE(smmu_debugfs_stats); i++) {
712 offs += sizeof(u32) * i;
713 val = readl(smmu->regs + offs);
715 seq_printf(s, "%s:%08x ", smmu_debugfs_stats[i], val);
717 pr_debug("%s() %s %08x @%08x\n", __func__,
718 smmu_debugfs_stats[i], val, offs);
725 static int smmu_debugfs_stats_open(struct inode *inode, struct file *file)
727 return single_open(file, smmu_debugfs_stats_show, inode);
730 static const struct file_operations smmu_debugfs_stats_fops = {
731 .open = smmu_debugfs_stats_open,
734 .release = single_release,
735 .write = smmu_debugfs_stats_write,
738 static void smmu_debugfs_delete(struct smmu_device *smmu)
740 debugfs_remove_recursive(smmu->debugfs_root);
743 static void smmu_debugfs_create(struct smmu_device *smmu)
748 root = debugfs_create_file("smmu",
749 S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
753 smmu->debugfs_root = root;
755 for (i = 0; i < ARRAY_SIZE(smmu_debugfs_mc); i++) {
759 mc = debugfs_create_file(smmu_debugfs_mc[i],
760 S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
761 root, (void *)i, NULL);
765 for (j = 0; j < ARRAY_SIZE(smmu_debugfs_cache); j++) {
766 struct dentry *cache;
768 cache = debugfs_create_file(smmu_debugfs_cache[j],
769 S_IWUGO | S_IRUGO, mc,
771 &smmu_debugfs_stats_fops);
780 smmu_debugfs_delete(smmu);
783 static int smmu_remove(struct platform_device *pdev)
785 struct smmu_device *smmu = platform_get_drvdata(pdev);
791 smmu_debugfs_delete(smmu);
794 writel(MC_SMMU_CONFIG_0_SMMU_ENABLE_DISABLE,
795 smmu->regs_mc + MC_SMMU_CONFIG_0);
798 platform_set_drvdata(pdev, NULL);
803 for (asid = 0; asid < smmu->num_ases; asid++)
804 free_pdir(&smmu->as[asid]);
808 #ifdef CONFIG_ARCH_TEGRA_3x_SOC
809 if (smmu->avp_vector_page)
810 __free_page(smmu->avp_vector_page);
812 tegra_iovmm_unregister(&smmu->iovmm_dev);
813 for (i = 0; i < _REGS; i++) {
815 iounmap(smmu->regs[i]);
816 smmu->regs[i] = NULL;
824 * Maps PTBL for given iova and returns the PTE address
825 * Caller must unmap the mapped PTBL returned in *ptbl_page_p
827 static unsigned long *locate_pte(struct smmu_as *as,
828 unsigned long iova, bool allocate,
829 struct page **ptbl_page_p,
830 unsigned int **pte_counter)
832 unsigned long ptn = SMMU_ADDR_TO_PFN(iova);
833 unsigned long pdn = SMMU_ADDR_TO_PDN(iova);
834 unsigned long *pdir = kmap(as->pdir_page);
837 if (pdir[pdn] != _PDE_VACANT(pdn)) {
838 /* Mapped entry table already exists */
839 *ptbl_page_p = SMMU_EX_PTBL_PAGE(pdir[pdn]);
840 ptbl = kmap(*ptbl_page_p);
841 } else if (!allocate) {
842 kunmap(as->pdir_page);
845 /* Vacant - allocate a new page table */
846 pr_debug("%s:%d new PTBL pdn=%lx\n", __func__, __LINE__, pdn);
848 *ptbl_page_p = alloc_page(GFP_KERNEL | __GFP_DMA);
850 kunmap(as->pdir_page);
852 ": failed to allocate tegra_iovmm_device page table\n");
855 SetPageReserved(*ptbl_page_p);
856 ptbl = (unsigned long *)kmap(*ptbl_page_p);
859 unsigned long addr = SMMU_PDN_TO_ADDR(pdn);
860 for (pn = 0; pn < SMMU_PTBL_COUNT;
861 pn++, addr += SMMU_PAGE_SIZE) {
862 ptbl[pn] = _PTE_VACANT(addr);
865 flush_cpu_dcache(ptbl, *ptbl_page_p, SMMU_PTBL_SIZE);
866 pdir[pdn] = SMMU_MK_PDE(*ptbl_page_p,
867 as->pde_attr | _PDE_NEXT);
868 flush_cpu_dcache(&pdir[pdn], as->pdir_page, sizeof pdir[pdn]);
869 flush_ptc_and_tlb(as->smmu, as, iova, &pdir[pdn],
872 *pte_counter = &as->pte_count[pdn];
874 kunmap(as->pdir_page);
875 return &ptbl[ptn % SMMU_PTBL_COUNT];
878 static void put_signature(struct smmu_as *as,
879 unsigned long addr, unsigned long pfn)
881 if (as->smmu->signature_pid == current->pid) {
882 struct page *page = pfn_to_page(pfn);
883 unsigned long *vaddr = kmap(page);
886 vaddr[1] = pfn << PAGE_SHIFT;
887 flush_cpu_dcache(vaddr, page, sizeof(vaddr[0]) * 2);
893 static int smmu_map(struct tegra_iovmm_domain *domain,
894 struct tegra_iovmm_area *iovma)
896 struct smmu_as *as = container_of(domain, struct smmu_as, domain);
897 unsigned long addr = iovma->iovm_start;
898 unsigned long pcount = iovma->iovm_length >> SMMU_PAGE_SHIFT;
901 pr_debug("%s:%d iova=%lx asid=%d\n", __func__, __LINE__,
902 addr, as - as->smmu->as);
904 for (i = 0; i < pcount; i++) {
907 unsigned int *pte_counter;
910 pfn = iovma->ops->lock_makeresident(iovma, i << PAGE_SHIFT);
914 mutex_lock(&as->lock);
916 pte = locate_pte(as, addr, true, &ptpage, &pte_counter);
920 pr_debug("%s:%d iova=%lx pfn=%lx asid=%d\n",
921 __func__, __LINE__, addr, pfn, as - as->smmu->as);
923 if (*pte == _PTE_VACANT(addr))
925 *pte = SMMU_PFN_TO_PTE(pfn, as->pte_attr);
926 if (unlikely((*pte == _PTE_VACANT(addr))))
928 flush_cpu_dcache(pte, ptpage, sizeof *pte);
929 flush_ptc_and_tlb(as->smmu, as, addr, pte, ptpage, 0);
931 mutex_unlock(&as->lock);
932 put_signature(as, addr, pfn);
933 addr += SMMU_PAGE_SIZE;
938 mutex_lock(&as->lock);
942 unsigned int *pte_counter;
945 iovma->ops->release(iovma, i<<PAGE_SHIFT);
946 addr -= SMMU_PAGE_SIZE;
947 pte = locate_pte(as, addr, false, &page, &pte_counter);
949 if (*pte != _PTE_VACANT(addr)) {
950 *pte = _PTE_VACANT(addr);
951 flush_cpu_dcache(pte, page, sizeof *pte);
952 flush_ptc_and_tlb(as->smmu, as, addr, pte,
955 if (!--(*pte_counter))
962 mutex_unlock(&as->lock);
966 static void smmu_unmap(struct tegra_iovmm_domain *domain,
967 struct tegra_iovmm_area *iovma, bool decommit)
969 struct smmu_as *as = container_of(domain, struct smmu_as, domain);
970 unsigned long addr = iovma->iovm_start;
971 unsigned int pcount = iovma->iovm_length >> SMMU_PAGE_SHIFT;
972 unsigned int i, *pte_counter;
974 pr_debug("%s:%d iova=%lx asid=%d\n", __func__, __LINE__,
975 addr, as - as->smmu->as);
977 mutex_lock(&as->lock);
978 for (i = 0; i < pcount; i++) {
982 if (iovma->ops && iovma->ops->release)
983 iovma->ops->release(iovma, i << PAGE_SHIFT);
985 pte = locate_pte(as, addr, false, &page, &pte_counter);
987 if (*pte != _PTE_VACANT(addr)) {
988 *pte = _PTE_VACANT(addr);
989 flush_cpu_dcache(pte, page, sizeof *pte);
990 flush_ptc_and_tlb(as->smmu, as, addr, pte,
993 if (!--(*pte_counter) && decommit) {
995 smmu_flush_regs(as->smmu, 0);
999 addr += SMMU_PAGE_SIZE;
1001 mutex_unlock(&as->lock);
1004 static void smmu_map_pfn(struct tegra_iovmm_domain *domain,
1005 struct tegra_iovmm_area *iovma, unsigned long addr,
1008 struct smmu_as *as = container_of(domain, struct smmu_as, domain);
1009 struct smmu_device *smmu = as->smmu;
1011 unsigned int *pte_counter;
1012 struct page *ptpage;
1014 pr_debug("%s:%d iova=%lx pfn=%lx asid=%d\n", __func__, __LINE__,
1015 addr, pfn, as - as->smmu->as);
1017 BUG_ON(!pfn_valid(pfn));
1018 mutex_lock(&as->lock);
1019 pte = locate_pte(as, addr, true, &ptpage, &pte_counter);
1021 if (*pte == _PTE_VACANT(addr))
1023 *pte = SMMU_PFN_TO_PTE(pfn, as->pte_attr);
1024 if (unlikely((*pte == _PTE_VACANT(addr))))
1026 flush_cpu_dcache(pte, ptpage, sizeof *pte);
1027 flush_ptc_and_tlb(smmu, as, addr, pte, ptpage, 0);
1029 put_signature(as, addr, pfn);
1031 mutex_unlock(&as->lock);
1035 * Caller must lock/unlock as
1037 static int alloc_pdir(struct smmu_as *as)
1039 unsigned long *pdir;
1045 as->pte_count = kzalloc(sizeof(as->pte_count[0]) * SMMU_PDIR_COUNT,
1047 if (!as->pte_count) {
1049 ": failed to allocate tegra_iovmm_device PTE cunters\n");
1052 as->pdir_page = alloc_page(GFP_KERNEL | __GFP_DMA);
1053 if (!as->pdir_page) {
1055 ": failed to allocate tegra_iovmm_device page directory\n");
1056 kfree(as->pte_count);
1057 as->pte_count = NULL;
1060 SetPageReserved(as->pdir_page);
1061 pdir = kmap(as->pdir_page);
1063 for (pdn = 0; pdn < SMMU_PDIR_COUNT; pdn++)
1064 pdir[pdn] = _PDE_VACANT(pdn);
1065 flush_cpu_dcache(pdir, as->pdir_page, SMMU_PDIR_SIZE);
1066 writel(MC_SMMU_PTC_FLUSH_0_PTC_FLUSH_TYPE_ADR |
1067 VA_PAGE_TO_PA(pdir, as->pdir_page),
1068 as->smmu->regs_mc + MC_SMMU_PTC_FLUSH_0);
1069 flush_smmu_regs(as->smmu);
1070 writel(MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_MATCH_ALL |
1071 MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH__ENABLE |
1072 (as->asid << MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_SHIFT),
1073 as->smmu->regs_mc + MC_SMMU_TLB_FLUSH_0);
1074 flush_smmu_regs(as->smmu);
1075 kunmap(as->pdir_page);
1080 static void _sysfs_create(struct smmu_as *as, struct device *sysfs_parent);
1083 * Allocate resources for an AS
1084 * TODO: split into "alloc" and "lock"
1086 static struct tegra_iovmm_domain *smmu_alloc_domain(
1087 struct tegra_iovmm_device *dev, struct tegra_iovmm_client *client)
1089 struct smmu_device *smmu =
1090 container_of(dev, struct smmu_device, iovmm_dev);
1091 struct smmu_as *as = NULL;
1092 const struct domain_hwc_map *map = NULL;
1095 /* Look for a free AS */
1096 for (asid = smmu->lowest_asid; asid < smmu->num_ases; asid++) {
1097 mutex_lock(&smmu->as[asid].lock);
1098 if (!smmu->as[asid].hwclients) {
1099 as = &smmu->as[asid];
1102 mutex_unlock(&smmu->as[asid].lock);
1106 pr_err(DRIVER_NAME ": no free AS\n");
1110 if (alloc_pdir(as) < 0)
1113 /* Look for a matching hardware client group */
1114 for (i = 0; i < ARRAY_SIZE(smmu_hwc_map); i++) {
1115 if (!strcmp(smmu_hwc_map[i].dev_name, client->misc_dev->name)) {
1116 map = &smmu_hwc_map[i];
1122 pr_err(DRIVER_NAME ": no SMMU resource for %s (%s)\n",
1123 client->name, client->misc_dev->name);
1127 spin_lock(&smmu->lock);
1128 /* Update PDIR register */
1129 writel(MC_SMMU_PTB_ASID_0_CURRENT_ASID(as->asid),
1130 as->smmu->regs_mc + MC_SMMU_PTB_ASID_0);
1131 writel(SMMU_MK_PDIR(as->pdir_page, as->pdir_attr),
1132 as->smmu->regs_mc + MC_SMMU_PTB_DATA_0);
1133 flush_smmu_regs(smmu);
1135 /* Put each hardware client in the group into the address space */
1136 for (i = 0; i < map->nr_hwcs; i++) {
1137 struct smmu_hwc_state *hwcst = &smmu->hwc_state[map->hwcs[i]];
1139 /* Is the hardware client busy? */
1140 if (hwcst->enable_disable != SMMU_ASID_DISABLE &&
1141 hwcst->enable_disable != SMMU_ASID_ENABLE(as->asid)) {
1143 ": HW 0x%lx busy for ASID %ld (client!=%s)\n",
1145 SMMU_ASID_ASID(hwcst->enable_disable),
1149 hwcst->enable_disable = SMMU_ASID_ENABLE(as->asid);
1150 writel(hwcst->enable_disable, smmu->regs_mc + hwcst->reg);
1152 flush_smmu_regs(smmu);
1153 spin_unlock(&smmu->lock);
1154 as->hwclients = map;
1155 _sysfs_create(as, client->misc_dev->this_device);
1156 mutex_unlock(&as->lock);
1158 #ifdef CONFIG_ARCH_TEGRA_3x_SOC
1159 /* Reserve "page zero" for AVP vectors using a common dummy page */
1160 smmu_map_pfn(&as->domain, NULL, 0,
1161 page_to_phys(as->smmu->avp_vector_page) >> SMMU_PAGE_SHIFT);
1166 /* Reset hardware clients that have been enabled */
1168 struct smmu_hwc_state *hwcst = &smmu->hwc_state[map->hwcs[i]];
1170 hwcst->enable_disable = SMMU_ASID_DISABLE;
1171 writel(hwcst->enable_disable, smmu->regs_mc + hwcst->reg);
1173 flush_smmu_regs(smmu);
1174 spin_unlock(&as->smmu->lock);
1178 mutex_unlock(&as->lock);
1184 * Release resources for an AS
1185 * TODO: split into "unlock" and "free"
1187 static void smmu_free_domain(
1188 struct tegra_iovmm_domain *domain, struct tegra_iovmm_client *client)
1190 struct smmu_as *as = container_of(domain, struct smmu_as, domain);
1191 struct smmu_device *smmu = as->smmu;
1192 const struct domain_hwc_map *map = NULL;
1195 mutex_lock(&as->lock);
1196 map = as->hwclients;
1198 spin_lock(&smmu->lock);
1199 for (i = 0; i < map->nr_hwcs; i++) {
1200 struct smmu_hwc_state *hwcst = &smmu->hwc_state[map->hwcs[i]];
1202 hwcst->enable_disable = SMMU_ASID_DISABLE;
1203 writel(SMMU_ASID_DISABLE, smmu->regs_mc + hwcst->reg);
1205 flush_smmu_regs(smmu);
1206 spin_unlock(&smmu->lock);
1208 as->hwclients = NULL;
1209 if (as->pdir_page) {
1210 spin_lock(&smmu->lock);
1211 writel(MC_SMMU_PTB_ASID_0_CURRENT_ASID(as->asid),
1212 smmu->regs_mc + MC_SMMU_PTB_ASID_0);
1213 writel(MC_SMMU_PTB_DATA_0_RESET_VAL,
1214 smmu->regs_mc + MC_SMMU_PTB_DATA_0);
1215 flush_smmu_regs(smmu);
1216 spin_unlock(&smmu->lock);
1220 mutex_unlock(&as->lock);
1223 static struct tegra_iovmm_device_ops tegra_iovmm_smmu_ops = {
1225 .unmap = smmu_unmap,
1226 .map_pfn = smmu_map_pfn,
1227 .alloc_domain = smmu_alloc_domain,
1228 .free_domain = smmu_free_domain,
1229 .suspend = smmu_suspend,
1230 .resume = smmu_resume,
1233 static int smmu_probe(struct platform_device *pdev)
1235 struct smmu_device *smmu = NULL;
1236 struct resource *window = NULL;
1239 BUILD_BUG_ON(PAGE_SHIFT != SMMU_PAGE_SHIFT);
1240 BUILD_BUG_ON(ARRAY_SIZE(smmu_hwc_state_init) != HWC_COUNT);
1242 window = tegra_smmu_window(0);
1244 pr_err(DRIVER_NAME ": No SMMU resources\n");
1248 smmu = kzalloc(sizeof(*smmu), GFP_KERNEL);
1250 pr_err(DRIVER_NAME ": failed to allocate smmu_device\n");
1254 smmu->num_ases = MC_SMMU_NUM_ASIDS;
1255 smmu->iovmm_base = (tegra_iovmm_addr_t)window->start;
1256 smmu->page_count = (window->end + 1 - window->start) >> SMMU_PAGE_SHIFT;
1257 for (i = _MC; i < _REGS; i++) {
1258 if (tegra_reg[i].base != 0)
1259 smmu->regs[i] = ioremap(tegra_reg[i].base,
1263 smmu->translation_enable_0_0 = ~0;
1264 smmu->translation_enable_1_0 = ~0;
1265 smmu->translation_enable_2_0 = ~0;
1266 smmu->asid_security_0 = 0;
1268 memcpy(smmu->hwc_state, smmu_hwc_state_init, sizeof(smmu->hwc_state));
1270 smmu->iovmm_dev.name = VMM_NAME;
1271 smmu->iovmm_dev.ops = &tegra_iovmm_smmu_ops;
1272 smmu->iovmm_dev.pgsize_bits = SMMU_PAGE_SHIFT;
1274 e = tegra_iovmm_register(&smmu->iovmm_dev);
1278 smmu->as = kzalloc(sizeof(smmu->as[0]) * smmu->num_ases, GFP_KERNEL);
1280 pr_err(DRIVER_NAME ": failed to allocate smmu_as\n");
1285 /* Initialize address space structure array */
1286 for (asid = 0; asid < smmu->num_ases; asid++) {
1287 struct smmu_as *as = &smmu->as[asid];
1291 as->pdir_attr = _PDIR_ATTR;
1292 as->pde_attr = _PDE_ATTR;
1293 as->pte_attr = _PTE_ATTR;
1295 mutex_init(&as->lock);
1297 e = tegra_iovmm_domain_init(&as->domain, &smmu->iovmm_dev,
1300 (smmu->page_count << SMMU_PAGE_SHIFT));
1304 spin_lock_init(&smmu->lock);
1305 smmu_setup_regs(smmu);
1307 smmu->dev = &pdev->dev;
1308 platform_set_drvdata(pdev, smmu);
1310 #ifdef CONFIG_ARCH_TEGRA_3x_SOC
1311 smmu->avp_vector_page = alloc_page(GFP_KERNEL);
1312 if (!smmu->avp_vector_page)
1315 smmu_debugfs_create(smmu);
1320 #ifdef CONFIG_ARCH_TEGRA_3x_SOC
1321 if (smmu->avp_vector_page)
1322 __free_page(smmu->avp_vector_page);
1324 if (smmu && smmu->as) {
1325 for (asid = 0; asid < smmu->num_ases; asid++) {
1326 if (smmu->as[asid].pdir_page) {
1327 ClearPageReserved(smmu->as[asid].pdir_page);
1328 __free_page(smmu->as[asid].pdir_page);
1333 for (i = 0; i < _REGS; i++) {
1334 if (smmu->regs[i]) {
1335 iounmap(smmu->regs[i]);
1336 smmu->regs[i] = NULL;
1343 static struct platform_driver tegra_iovmm_smmu_drv = {
1344 .probe = smmu_probe,
1345 .remove = smmu_remove,
1347 .name = DRIVER_NAME,
1351 static int __devinit smmu_init(void)
1353 return platform_driver_register(&tegra_iovmm_smmu_drv);
1356 static void __exit smmu_exit(void)
1358 platform_driver_unregister(&tegra_iovmm_smmu_drv);
1361 subsys_initcall(smmu_init);
1362 module_exit(smmu_exit);
1365 * SMMU-global sysfs interface for debugging
1367 static ssize_t _sysfs_show_reg(struct device *d,
1368 struct device_attribute *da, char *buf);
1369 static ssize_t _sysfs_store_reg(struct device *d,
1370 struct device_attribute *da, const char *buf,
1373 #define _NAME_MAP_SUFFIX(_name, base, suffix) { \
1374 .name = __stringify(_name) suffix, \
1375 .offset = _name##_0, \
1376 .regbase = (base), \
1377 .dev_attr = __ATTR(_name, S_IRUGO | S_IWUSR, \
1378 _sysfs_show_reg, _sysfs_store_reg) \
1380 #define _NAME_MAP(_name, base) _NAME_MAP_SUFFIX(_name, base, "")
1383 struct _reg_name_map {
1387 struct device_attribute dev_attr;
1388 } _smmu_reg_name_map[] = {
1389 _NAME_MAP(MC_INTSTATUS, _MC),
1390 _NAME_MAP(MC_ERR_STATUS, _MC),
1391 _NAME_MAP(MC_ERR_ADR, _MC),
1393 _NAME_MAP(MC_SMMU_CONFIG, _MC),
1394 _NAME_MAP(MC_SMMU_TLB_CONFIG, _MC),
1395 _NAME_MAP(MC_SMMU_PTC_CONFIG, _MC),
1396 _NAME_MAP(MC_SMMU_PTB_ASID, _MC),
1397 _NAME_MAP(MC_SMMU_PTB_DATA, _MC),
1398 _NAME_MAP(MC_SMMU_TLB_FLUSH, _MC),
1399 _NAME_MAP(MC_SMMU_PTC_FLUSH, _MC),
1400 _NAME_MAP(MC_SMMU_ASID_SECURITY, _MC),
1401 _NAME_MAP(MC_EMEM_CFG, _MC),
1402 _NAME_MAP(MC_SECURITY_CFG0, _MC),
1403 _NAME_MAP(MC_SECURITY_CFG1, _MC),
1404 _NAME_MAP(MC_SECURITY_CFG2, _MC),
1405 _NAME_MAP(MC_SECURITY_RSV, _MC),
1406 _NAME_MAP(MC_SMMU_STATS_TLB_HIT_COUNT, _MC),
1407 _NAME_MAP(MC_SMMU_STATS_TLB_MISS_COUNT, _MC),
1408 _NAME_MAP(MC_SMMU_STATS_PTC_HIT_COUNT, _MC),
1409 _NAME_MAP(MC_SMMU_STATS_PTC_MISS_COUNT, _MC),
1410 #ifdef TEGRA_MC0_BASE
1411 _NAME_MAP_SUFFIX(MC_SMMU_STATS_TLB_HIT_COUNT, _MC0, ".0"),
1412 _NAME_MAP_SUFFIX(MC_SMMU_STATS_TLB_MISS_COUNT, _MC0, ".0"),
1413 _NAME_MAP_SUFFIX(MC_SMMU_STATS_PTC_HIT_COUNT, _MC0, ".0"),
1414 _NAME_MAP_SUFFIX(MC_SMMU_STATS_PTC_MISS_COUNT, _MC0, ".0"),
1416 #ifdef TEGRA_MC1_BASE
1417 _NAME_MAP_SUFFIX(MC_SMMU_STATS_TLB_HIT_COUNT, _MC1, ".1"),
1418 _NAME_MAP_SUFFIX(MC_SMMU_STATS_TLB_MISS_COUNT, _MC1, ".1"),
1419 _NAME_MAP_SUFFIX(MC_SMMU_STATS_PTC_HIT_COUNT, _MC1, ".1"),
1420 _NAME_MAP_SUFFIX(MC_SMMU_STATS_PTC_MISS_COUNT, _MC1, ".1"),
1422 _NAME_MAP(MC_SMMU_TRANSLATION_ENABLE_0, _MC),
1423 _NAME_MAP(MC_SMMU_TRANSLATION_ENABLE_1, _MC),
1424 _NAME_MAP(MC_SMMU_TRANSLATION_ENABLE_2, _MC),
1426 _NAME_MAP(MC_STAT_CONTROL, _MC),
1427 _NAME_MAP(MC_STAT_EMC_CLOCKS, _MC),
1428 _NAME_MAP(MC_STAT_EMC_FILTER_SET0_ADR_LIMIT_LO, _MC),
1429 _NAME_MAP(MC_STAT_EMC_FILTER_SET0_ADR_LIMIT_HI, _MC),
1430 _NAME_MAP(MC_STAT_EMC_FILTER_SET0_CLIENT_0, _MC),
1431 _NAME_MAP(MC_STAT_EMC_FILTER_SET0_CLIENT_1, _MC),
1432 _NAME_MAP(MC_STAT_EMC_FILTER_SET0_CLIENT_2, _MC),
1433 _NAME_MAP(MC_STAT_EMC_SET0_COUNT, _MC),
1434 _NAME_MAP(MC_STAT_EMC_SET0_COUNT_MSBS, _MC),
1435 _NAME_MAP(MC_STAT_EMC_FILTER_SET1_ADR_LIMIT_LO, _MC),
1436 _NAME_MAP(MC_STAT_EMC_FILTER_SET1_ADR_LIMIT_HI, _MC),
1437 _NAME_MAP(MC_STAT_EMC_FILTER_SET1_CLIENT_0, _MC),
1438 _NAME_MAP(MC_STAT_EMC_FILTER_SET1_CLIENT_1, _MC),
1439 _NAME_MAP(MC_STAT_EMC_FILTER_SET1_CLIENT_2, _MC),
1440 _NAME_MAP(MC_STAT_EMC_SET1_COUNT, _MC),
1441 _NAME_MAP(MC_STAT_EMC_SET1_COUNT_MSBS, _MC),
1442 _NAME_MAP(MC_STAT_EMC_FILTER_SET0_VIRTUAL_ADR_LIMIT_LO, _MC),
1443 _NAME_MAP(MC_STAT_EMC_FILTER_SET0_VIRTUAL_ADR_LIMIT_HI, _MC),
1444 _NAME_MAP(MC_STAT_EMC_FILTER_SET0_ASID, _MC),
1445 _NAME_MAP(MC_STAT_EMC_FILTER_SET1_VIRTUAL_ADR_LIMIT_LO, _MC),
1446 _NAME_MAP(MC_STAT_EMC_FILTER_SET1_VIRTUAL_ADR_LIMIT_HI, _MC),
1447 _NAME_MAP(MC_STAT_EMC_FILTER_SET1_ASID, _MC),
1448 #define op(c) _NAME_MAP(MC_SMMU_##c##_ASID, _MC),
1451 _NAME_MAP(AHB_ARBITRATION_XBAR_CTRL, _AHBARB),
1452 #ifdef AHB_MASTER_SWID_0
1453 _NAME_MAP(AHB_MASTER_SWID, _AHBARB),
1455 #ifdef APBDMA_CHANNEL_SWID_0
1456 _NAME_MAP(APBDMA_CHANNEL_SWID, _APBDMA),
1460 static struct _reg_name_map *lookup_reg(struct device_attribute *da)
1463 for (i = 0; i < ARRAY_SIZE(_smmu_reg_name_map); i++) {
1464 if (!strcmp(_smmu_reg_name_map[i].name, da->attr.name))
1465 return &_smmu_reg_name_map[i];
1470 static ssize_t _sysfs_show_reg(struct device *d,
1471 struct device_attribute *da, char *buf)
1473 struct smmu_device *smmu =
1474 container_of(d, struct smmu_device, sysfs_dev);
1475 struct _reg_name_map *reg = lookup_reg(da);
1479 return sprintf(buf, "%08lx @%08lx\n",
1480 (unsigned long)readl(smmu->regs[reg->regbase] + reg->offset),
1481 tegra_reg[reg->regbase].base + reg->offset);
1484 #ifdef CONFIG_TEGRA_IOVMM_SMMU_SYSFS
1485 #define good_challenge(smmu) ((smmu->challenge_pid = 0), 1)
1487 static inline int good_challenge(struct smmu_device *smmu)
1489 int ok = (smmu->challenge_pid == current->pid);
1490 smmu->challenge_pid = 0;
1495 static ssize_t _sysfs_store_reg(struct device *d,
1496 struct device_attribute *da,
1497 const char *buf, size_t count)
1499 struct smmu_device *smmu =
1500 container_of(d, struct smmu_device, sysfs_dev);
1501 struct _reg_name_map *reg = lookup_reg(da);
1502 unsigned long value;
1506 if (kstrtoul(buf, 16, &value))
1508 if (good_challenge(smmu))
1509 writel(value, smmu->regs[reg->regbase] + reg->offset);
1510 else if (reg->regbase == _MC) {
1511 unsigned long mask = 0;
1512 switch (reg->offset) {
1513 case MC_SMMU_TLB_CONFIG_0:
1514 mask = MC_SMMU_TLB_CONFIG_0_TLB_STATS_ENABLE__MASK |
1515 MC_SMMU_TLB_CONFIG_0_TLB_STATS_TEST__MASK;
1517 case MC_SMMU_PTC_CONFIG_0:
1518 mask = MC_SMMU_PTC_CONFIG_0_PTC_STATS_ENABLE__MASK |
1519 MC_SMMU_PTC_CONFIG_0_PTC_STATS_TEST__MASK;
1526 unsigned long currval =
1527 (unsigned long)readl(smmu->regs[reg->regbase] +
1532 writel(value, smmu->regs[reg->regbase] + reg->offset);
1538 static ssize_t _sysfs_show_smmu(struct device *d,
1539 struct device_attribute *da, char *buf)
1541 struct smmu_device *smmu =
1542 container_of(d, struct smmu_device, sysfs_dev);
1546 rv += sprintf(buf + rv , " regs_mc: %p @%8lx\n",
1547 smmu->regs_mc, tegra_reg[_MC].base);
1548 #ifdef TEGRA_MC0_BASE
1549 rv += sprintf(buf + rv , " regs_mc0: %p @%8lx\n",
1550 smmu->regs_mc0, tegra_reg[_MC0].base);
1552 #ifdef TEGRA_MC1_BASE
1553 rv += sprintf(buf + rv , " regs_mc1: %p @%8lx\n",
1554 smmu->regs_mc1, tegra_reg[_MC1].base);
1556 rv += sprintf(buf + rv , "regs_ahbarb: %p @%8lx\n",
1557 smmu->regs_ahbarb, tegra_reg[_AHBARB].base);
1558 rv += sprintf(buf + rv , "regs_apbdma: %p @%8lx\n",
1559 smmu->regs_apbdma, tegra_reg[_APBDMA].base);
1560 rv += sprintf(buf + rv , " iovmm_base: %p\n", (void *)smmu->iovmm_base);
1561 rv += sprintf(buf + rv , " page_count: %8lx\n", smmu->page_count);
1562 rv += sprintf(buf + rv , " num_ases: %d\n", smmu->num_ases);
1563 rv += sprintf(buf + rv , " as: %p\n", smmu->as);
1564 for (asid = 0; asid < smmu->num_ases; asid++) {
1566 sprintf(buf + rv , " ----- asid: %d\n", smmu->as[asid].asid);
1568 sprintf(buf + rv , " pdir_page: %p", smmu->as[asid].pdir_page);
1569 if (smmu->as[asid].pdir_page)
1571 sprintf(buf + rv , " @%8lx\n",
1572 (unsigned long)page_to_phys(smmu->as[asid].pdir_page));
1574 rv += sprintf(buf + rv , "\n");
1576 rv += sprintf(buf + rv , " enable: %s\n",
1577 smmu->enable ? "yes" : "no");
1581 static struct device_attribute _attr_show_smmu
1582 = __ATTR(show_smmu, S_IRUGO, _sysfs_show_smmu, NULL);
1584 #define _SYSFS_SHOW_VALUE(name, field, fmt) \
1585 static ssize_t _sysfs_show_##name(struct device *d, \
1586 struct device_attribute *da, char *buf) \
1588 struct smmu_device *smmu = \
1589 container_of(d, struct smmu_device, sysfs_dev); \
1590 return sprintf(buf, fmt "\n", smmu->field); \
1593 static void (*_sysfs_null_callback)(struct smmu_device *, unsigned long *) =
1596 #define _SYSFS_SET_VALUE_DO(name, field, base, ceil, callback, challenge) \
1597 static ssize_t _sysfs_set_##name(struct device *d, \
1598 struct device_attribute *da, const char *buf, size_t count) \
1600 unsigned long value; \
1601 struct smmu_device *smmu = \
1602 container_of(d, struct smmu_device, sysfs_dev); \
1603 if (kstrtoul(buf, base, &value)) \
1605 if (challenge && 0 <= value && value < ceil) { \
1606 smmu->field = value; \
1608 callback(smmu, &smmu->field); \
1610 smmu->challenge_pid = 0; \
1613 #ifdef CONFIG_TEGRA_IOVMM_SMMU_SYSFS
1614 #define _SYSFS_SET_VALUE(name, field, base, ceil, callback) \
1615 _SYSFS_SET_VALUE_DO(name, field, base, ceil, callback, 1)
1617 #define _SYSFS_SET_VALUE(name, field, base, ceil, callback) \
1618 _SYSFS_SET_VALUE_DO(name, field, base, ceil, callback, \
1619 (smmu->challenge_pid == current->pid))
1622 _SYSFS_SHOW_VALUE(lowest_asid, lowest_asid, "%lu")
1623 _SYSFS_SET_VALUE(lowest_asid, lowest_asid, 10,
1624 MC_SMMU_NUM_ASIDS, _sysfs_null_callback)
1625 _SYSFS_SHOW_VALUE(debug_asid, debug_asid, "%lu")
1626 _SYSFS_SET_VALUE(debug_asid, debug_asid, 10,
1627 MC_SMMU_NUM_ASIDS, _sysfs_null_callback)
1628 _SYSFS_SHOW_VALUE(signature_pid, signature_pid, "%lu")
1629 _SYSFS_SET_VALUE_DO(signature_pid, signature_pid, 10, PID_MAX_LIMIT+1,
1630 _sysfs_null_callback, 1)
1633 * Protection for sysfs entries from accidental writing
1634 * /sys/devices/smmu/chanllenge_code returns a random number.
1635 * The process writes back pid^challenge to /sys/devices/smmu/challenge_code.
1636 * The process will be able to alter a protected entry.
1637 * The challenge code is reset.
1639 static ssize_t _sysfs_show_challenge_code(struct device *d,
1640 struct device_attribute *da, char *buf)
1642 struct smmu_device *smmu =
1643 container_of(d, struct smmu_device, sysfs_dev);
1644 smmu->challenge_pid = 0;
1645 smmu->challenge_code = random32();
1646 return sprintf(buf, "%lx\n", smmu->challenge_code);
1649 static ssize_t _sysfs_set_challenge_code(struct device *d,
1650 struct device_attribute *da, const char *buf, size_t count)
1652 struct smmu_device *smmu =
1653 container_of(d, struct smmu_device, sysfs_dev);
1654 unsigned long value;
1655 if (!kstrtoul(buf, 16, &value)) {
1656 smmu->challenge_pid = smmu->challenge_code ^ value;
1657 smmu->challenge_code = random32();
1663 * "echo 's d' > /sys/devices/smmu/copy_pdir" copies ASID s's pdir pointer
1664 * to ASID d. -1 as s resets d's pdir to null.
1666 static ssize_t _sysfs_copy_pdir(struct device *d,
1667 struct device_attribute *da, const char *buf, size_t count)
1669 struct smmu_device *smmu =
1670 container_of(d, struct smmu_device, sysfs_dev);
1673 if (kstrtol(buf, 16, &fr))
1675 while (isxdigit(*buf))
1677 while (isspace(*buf))
1679 if (kstrtol(buf, 16, &to))
1682 if (good_challenge(smmu) && fr != to &&
1683 fr < smmu->num_ases && 0 <= to && to < smmu->num_ases) {
1684 spin_lock(&smmu->lock);
1685 writel(MC_SMMU_PTB_ASID_0_CURRENT_ASID(to),
1686 smmu->regs_mc + MC_SMMU_PTB_ASID_0);
1688 ? SMMU_MK_PDIR(smmu->as[fr].pdir_page, smmu->as[fr].pdir_attr)
1689 : MC_SMMU_PTB_DATA_0_RESET_VAL,
1690 smmu->regs_mc + MC_SMMU_PTB_DATA_0);
1691 smmu->as[to].pdir_page = (fr >= 0) ? smmu->as[fr].pdir_page : 0;
1692 spin_unlock(&smmu->lock);
1697 static void _sysfs_mask_attr(struct smmu_device *smmu, unsigned long *field)
1699 *field &= _MASK_ATTR;
1702 static void _sysfs_mask_pdir_attr(struct smmu_device *smmu,
1703 unsigned long *field)
1707 _sysfs_mask_attr(smmu, field);
1708 spin_lock(&smmu->lock);
1709 writel(MC_SMMU_PTB_ASID_0_CURRENT_ASID(smmu->debug_asid),
1710 smmu->regs_mc + MC_SMMU_PTB_ASID_0);
1711 pdir = readl(smmu->regs_mc + MC_SMMU_PTB_DATA_0);
1712 pdir &= ~_MASK_ATTR;
1714 writel(pdir, smmu->regs_mc + MC_SMMU_PTB_DATA_0);
1715 spin_unlock(&smmu->lock);
1716 flush_smmu_regs(smmu);
1719 static void (*_sysfs_mask_attr_callback)(struct smmu_device *,
1720 unsigned long *field) = &_sysfs_mask_attr;
1721 static void (*_sysfs_mask_pdir_attr_callback)(struct smmu_device *,
1722 unsigned long *field) = &_sysfs_mask_pdir_attr;
1724 _SYSFS_SHOW_VALUE(pdir_attr, as[smmu->debug_asid].pdir_attr, "%lx")
1725 _SYSFS_SET_VALUE(pdir_attr, as[smmu->debug_asid].pdir_attr, 16,
1726 _PDIR_ATTR + 1, _sysfs_mask_pdir_attr_callback)
1727 _SYSFS_SHOW_VALUE(pde_attr, as[smmu->debug_asid].pde_attr, "%lx")
1728 _SYSFS_SET_VALUE(pde_attr, as[smmu->debug_asid].pde_attr, 16,
1729 _PDE_ATTR + 1, _sysfs_mask_attr_callback)
1730 _SYSFS_SHOW_VALUE(pte_attr, as[smmu->debug_asid].pte_attr, "%lx")
1731 _SYSFS_SET_VALUE(pte_attr, as[smmu->debug_asid].pte_attr, 16,
1732 _PTE_ATTR + 1, _sysfs_mask_attr_callback)
1734 static struct device_attribute _attr_values[] = {
1735 __ATTR(lowest_asid, S_IRUGO | S_IWUSR,
1736 _sysfs_show_lowest_asid, _sysfs_set_lowest_asid),
1737 __ATTR(debug_asid, S_IRUGO | S_IWUSR,
1738 _sysfs_show_debug_asid, _sysfs_set_debug_asid),
1739 __ATTR(signature_pid, S_IRUGO | S_IWUSR,
1740 _sysfs_show_signature_pid, _sysfs_set_signature_pid),
1741 __ATTR(challenge_code, S_IRUGO | S_IWUSR,
1742 _sysfs_show_challenge_code, _sysfs_set_challenge_code),
1743 __ATTR(copy_pdir, S_IWUSR, NULL, _sysfs_copy_pdir),
1745 __ATTR(pdir_attr, S_IRUGO | S_IWUSR,
1746 _sysfs_show_pdir_attr, _sysfs_set_pdir_attr),
1747 __ATTR(pde_attr, S_IRUGO | S_IWUSR,
1748 _sysfs_show_pde_attr, _sysfs_set_pde_attr),
1749 __ATTR(pte_attr, S_IRUGO | S_IWUSR,
1750 _sysfs_show_pte_attr, _sysfs_set_pte_attr),
1753 static struct attribute *_smmu_attrs[
1754 ARRAY_SIZE(_smmu_reg_name_map) + ARRAY_SIZE(_attr_values) + 3];
1755 static struct attribute_group _smmu_attr_group = {
1756 .attrs = _smmu_attrs
1759 static void _sysfs_smmu(struct smmu_device *smmu, struct device *parent)
1763 if (smmu->sysfs_use_count++ > 0)
1765 for (i = 0; i < ARRAY_SIZE(_smmu_reg_name_map); i++) {
1766 attr_name(_smmu_reg_name_map[i].dev_attr) =
1767 _smmu_reg_name_map[i].name;
1768 _smmu_attrs[i] = &_smmu_reg_name_map[i].dev_attr.attr;
1770 for (j = 0; j < ARRAY_SIZE(_attr_values); j++)
1771 _smmu_attrs[i++] = &_attr_values[j].attr;
1772 _smmu_attrs[i++] = &_attr_show_smmu.attr;
1773 _smmu_attrs[i] = NULL;
1775 dev_set_name(&smmu->sysfs_dev, "smmu");
1776 smmu->sysfs_dev.parent = parent;
1777 smmu->sysfs_dev.driver = NULL;
1778 smmu->sysfs_dev.release = NULL;
1779 if (device_register(&smmu->sysfs_dev)) {
1780 pr_err("%s: failed to register smmu_sysfs_dev\n", __func__);
1781 smmu->sysfs_use_count--;
1784 if (sysfs_create_group(&smmu->sysfs_dev.kobj, &_smmu_attr_group)) {
1785 pr_err("%s: failed to create group for smmu_sysfs_dev\n",
1787 smmu->sysfs_use_count--;
1792 static void _sysfs_create(struct smmu_as *as, struct device *parent)
1794 _sysfs_smmu(as->smmu, parent);