[ARM/tegra] Add Tegra3 support
[linux-2.6.git] / arch / arm / mach-tegra / iovmm-smmu.c
1 /*
2  * arch/arm/mach-tegra/iovmm-smmu.c
3  *
4  * Tegra I/O VMM implementation for SMMU devices for Tegra 3 series
5  * systems-on-a-chip.
6  *
7  * Copyright (c) 2010, NVIDIA Corporation.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful, but WITHOUT
15  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
17  * more details.
18  *
19  * You should have received a copy of the GNU General Public License along
20  * with this program; if not, write to the Free Software Foundation, Inc.,
21  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
22  */
23
24 #include <linux/module.h>
25 #include <linux/platform_device.h>
26 #include <linux/spinlock.h>
27 #include <linux/mm.h>
28 #include <linux/pagemap.h>
29 #include <linux/sysfs.h>
30 #include <linux/slab.h>
31
32 #include <asm/io.h>
33 #include <asm/page.h>
34 #include <asm/cacheflush.h>
35
36 #include <mach/iovmm.h>
37 #include <mach/iomap.h>
38
39 // For debugging
40 //#define HIT_MISS_STAT
41 //#define SMMU_SYSFS
42
43 #if defined(CONFIG_ARCH_TEGRA_3x_SOC)
44 //
45 // ALL-CAP macros have been copied from t30/armc.h
46 //
47 #define MC_SMMU_CONFIG_0                                0x10
48 #define MC_SMMU_CONFIG_0_SMMU_ENABLE_DISABLE            0
49 #define MC_SMMU_CONFIG_0_SMMU_ENABLE_ENABLE             1
50
51 #define MC_SMMU_TLB_CONFIG_0                            0x14
52 #define MC_SMMU_TLB_CONFIG_0_TLB_STATS_enable           (1<<31)
53 #define MC_SMMU_TLB_CONFIG_0_TLB_HIT_UNDER_MISS_enable  (1<<29)
54 #define MC_SMMU_TLB_CONFIG_0_TLB_ACTIVE_LINES_value     0x10
55 #define MC_SMMU_TLB_CONFIG_0_RESET_VAL                  0x20000010
56
57 #define MC_SMMU_PTC_CONFIG_0                            0x18
58 #define MC_SMMU_PTC_CONFIG_0_PTC_STATS_enable           (1<<31)
59 #define MC_SMMU_PTC_CONFIG_0_PTC_CACHE_enable           (1<<29)
60 #define MC_SMMU_PTC_CONFIG_0_PTC_INDEX_MAP_pattern      0x3f
61 #define MC_SMMU_PTC_CONFIG_0_RESET_VAL                  0x2000003f
62
63 #define MC_SMMU_PTB_ASID_0                              0x1c
64 #define MC_SMMU_PTB_ASID_0_CURRENT_ASID_SHIFT           0
65
66 #define MC_SMMU_PTB_DATA_0                              0x20
67 #define MC_SMMU_PTB_DATA_0_RESET_VAL                    0
68 #define MC_SMMU_PTB_DATA_0_ASID_NONSECURE_SHIFT         29
69 #define MC_SMMU_PTB_DATA_0_ASID_WRITABLE_SHIFT          30
70 #define MC_SMMU_PTB_DATA_0_ASID_READABLE_SHIFT          31
71
72 #define MC_SMMU_TLB_FLUSH_0                             0x30
73 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_MATCH_ALL              0
74 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_MATCH_SECTION          2
75 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_MATCH_GROUP            3
76 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_SHIFT                29
77 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_DISABLE        0
78 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_ENABLE         1
79 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_SHIFT          31
80
81 #define MC_SMMU_PTC_FLUSH_0                             0x34
82 #define MC_SMMU_PTC_FLUSH_0_PTC_FLUSH_TYPE_ALL          0
83 #define MC_SMMU_PTC_FLUSH_0_PTC_FLUSH_TYPE_ADR          1
84 #define MC_SMMU_PTC_FLUSH_0_PTC_FLUSH_ADR_SHIFT         4
85
86 #define MC_SMMU_ASID_SECURITY_0                         0x38
87
88 #define MC_SMMU_STATS_TLB_HIT_COUNT_0                   0x1f0
89 #define MC_SMMU_STATS_TLB_MISS_COUNT_0                  0x1f4
90 #define MC_SMMU_STATS_PTC_HIT_COUNT_0                   0x1f8
91 #define MC_SMMU_STATS_PTC_MISS_COUNT_0                  0x1fc
92
93 #define MC_SMMU_TRANSLATION_ENABLE_0_0                  0x228
94 #define MC_SMMU_TRANSLATION_ENABLE_1_0                  0x22c
95 #define MC_SMMU_TRANSLATION_ENABLE_2_0                  0x230
96
97 #define MC_SMMU_AFI_ASID_0              0x238   // PCIE
98 #define MC_SMMU_AVPC_ASID_0             0x23c   // AVP
99 #define MC_SMMU_DC_ASID_0               0x240   // Display controller
100 #define MC_SMMU_DCB_ASID_0              0x244   // Display controller B
101 #define MC_SMMU_EPP_ASID_0              0x248   // Encoder pre-processor
102 #define MC_SMMU_G2_ASID_0               0x24c   // 2D engine
103 #define MC_SMMU_HC_ASID_0               0x250   // Host1x
104 #define MC_SMMU_HDA_ASID_0              0x254   // High-def audio
105 #define MC_SMMU_ISP_ASID_0              0x258   // Image signal processor
106 #define MC_SMMU_MPE_ASID_0              0x264   // MPEG encoder
107 #define MC_SMMU_NV_ASID_0               0x268   // (3D)
108 #define MC_SMMU_NV2_ASID_0              0x26c   // (3D)
109 #define MC_SMMU_PPCS_ASID_0             0x270   // AHB
110 #define MC_SMMU_SATA_ASID_0             0x278   // SATA
111 #define MC_SMMU_VDE_ASID_0              0x27c   // Video decoder
112 #define MC_SMMU_VI_ASID_0               0x280   // Video input
113
114 #define SMMU_PDE_NEXT_SHIFT             28
115 #endif
116
117 #define MC_SMMU_NUM_ASIDS       4
118 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_GROUP_mask             0xffffc000
119 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_GROUP_shift    12      // right shift
120 #define MC_SMMU_PTB_ASID_0_CURRENT_ASID(n)      \
121                 ((n) << MC_SMMU_PTB_ASID_0_CURRENT_ASID_SHIFT)
122 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_disable                \
123                 (MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_DISABLE <<    \
124                         MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_SHIFT)
125 #define MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_enable                 \
126                 (MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_ENABLE <<     \
127                         MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_SHIFT)
128
129 #define VMM_NAME "iovmm-smmu"
130 #define DRIVER_NAME "tegra_smmu"
131
132 #define SMMU_PAGE_SHIFT 12
133 #define SMMU_PAGE_SIZE          (1 << SMMU_PAGE_SHIFT)
134
135 typedef unsigned long smmu_pde_t;
136 typedef unsigned long smmu_pte_t;
137
138 #define SMMU_PDIR_COUNT 1024
139 #define SMMU_PDIR_SIZE  (sizeof(smmu_pde_t) * SMMU_PDIR_COUNT)
140 #define SMMU_PTBL_COUNT 1024
141 #define SMMU_PTBL_SIZE  (sizeof(smmu_pte_t) * SMMU_PTBL_COUNT)
142 #define SMMU_PDIR_SHIFT 12
143 #define SMMU_PDE_SHIFT  12
144 #define SMMU_PTE_SHIFT  12
145 #define SMMU_PFN_MASK   0x000fffff
146
147 #define SMMU_ADDR_TO_PFN(addr)  ((addr)>>12)
148 #define SMMU_ADDR_TO_PDN(addr)  ((addr)>>22)
149 #define SMMU_PDN_TO_ADDR(addr)  ((pdn)<<22)
150
151 #define _READABLE       (1<<MC_SMMU_PTB_DATA_0_ASID_READABLE_SHIFT)
152 #define _WRITABLE       (1<<MC_SMMU_PTB_DATA_0_ASID_WRITABLE_SHIFT)
153 #define _NONSECURE      (1<<MC_SMMU_PTB_DATA_0_ASID_NONSECURE_SHIFT)
154 #define _PDE_NEXT       (1<<SMMU_PDE_NEXT_SHIFT)
155
156 #define _PDIR_ATTR      (_READABLE|_WRITABLE|_NONSECURE)
157
158 #define _PDE_ATTR       (_READABLE|_WRITABLE|_NONSECURE)
159 #define _PDE_ATTR_N     (_PDE_ATTR|_PDE_NEXT)
160 #define _PDE_VACANT(pdn)        (((pdn)<<10)|_PDE_ATTR)
161
162 #define _PTE_ATTR       (_READABLE|_WRITABLE|_NONSECURE)
163 #define _PTE_VACANT(addr)       (((addr)>>SMMU_PAGE_SHIFT)|_PTE_ATTR)
164
165 #define SMMU_MK_PDIR(page, attr)        \
166                 ((page_to_phys(page)>>SMMU_PDIR_SHIFT)|(attr))
167 #define SMMU_MK_PDE(page, attr)         \
168                 (smmu_pde_t)((page_to_phys(page)>>SMMU_PDE_SHIFT)|(attr))
169 #define SMMU_EX_PTBL_PAGE(pde)          \
170                 pfn_to_page((unsigned long)(pde) & SMMU_PFN_MASK)
171 #define SMMU_PFN_TO_PTE(pfn, attr)      (smmu_pte_t)((pfn)|(attr))
172
173 #define SMMU_ASID_ENABLE(asid)  ((asid)|(1<<31))
174 #define SMMU_ASID_DISABLE       0
175 #define SMMU_ASID_ASID(n)       ((n)&~SMMU_ASID_ENABLE(0))
176
177 // Keep this as a "natural" enumeration (no assignments)
178 enum smmu_hwclient {
179         HWC_AFI,
180         HWC_AVPC,
181         HWC_DC,
182         HWC_DCB,
183         HWC_EPP,
184         HWC_G2,
185         HWC_HC,
186         HWC_HDA,
187         HWC_ISP,
188         HWC_MPE,
189         HWC_NV,
190         HWC_NV2,
191         HWC_PPCS,
192         HWC_SATA,
193         HWC_VDE,
194         HWC_VI,
195
196         HWC_COUNT
197 };
198
199 struct smmu_hwc_state {
200         unsigned long reg;
201         unsigned long enable_disable;
202 };
203
204 // Hardware client mapping initializer
205 #define HWC_INIT(client)        \
206         [HWC_##client] = {MC_SMMU_##client##_ASID_0, SMMU_ASID_DISABLE},
207
208 static const struct smmu_hwc_state smmu_hwc_state_init[] = {
209         HWC_INIT(AFI)
210         HWC_INIT(AVPC)
211         HWC_INIT(DC)
212         HWC_INIT(DCB)
213         HWC_INIT(EPP)
214         HWC_INIT(G2)
215         HWC_INIT(HC)
216         HWC_INIT(HDA)
217         HWC_INIT(ISP)
218         HWC_INIT(MPE)
219         HWC_INIT(NV)
220         HWC_INIT(NV2)
221         HWC_INIT(PPCS)
222         HWC_INIT(SATA)
223         HWC_INIT(VDE)
224         HWC_INIT(VI)
225 };
226
227
228 struct domain_hwc_map {
229         const char *dev_name;
230         const enum smmu_hwclient *hwcs;
231         const unsigned int nr_hwcs;
232 };
233
234 // Enable all hardware clients for SMMU translation
235 static const enum smmu_hwclient nvmap_hwcs[] = {
236         HWC_AFI,
237         HWC_AVPC,
238         HWC_DC,
239         HWC_DCB,
240         HWC_EPP,
241         HWC_G2,
242         HWC_HC,
243         HWC_HDA,
244         HWC_ISP,
245         HWC_MPE,
246         HWC_NV,
247         HWC_NV2,
248         HWC_PPCS,
249         HWC_SATA,
250         HWC_VDE,
251         HWC_VI
252 };
253
254 static const struct domain_hwc_map smmu_hwc_map[] = {
255         {
256                 .dev_name = "nvmap",
257                 .hwcs = nvmap_hwcs,
258                 .nr_hwcs = ARRAY_SIZE(nvmap_hwcs),
259         },
260 };
261
262 //
263 // Per address space
264 //
265 struct smmu_as {
266         struct smmu_device      *smmu;  /* back pointer to container */
267         unsigned int            asid;
268         const struct domain_hwc_map     *hwclients;
269         struct semaphore        sem;
270         struct tegra_iovmm_domain domain;
271         bool            needs_barrier;  /* emulator WAR */
272         struct page     *pdir_page;
273         unsigned long   pte_attr;
274         unsigned int    *pte_count;
275         struct device   sysfs_dev;
276         int             sysfs_use_count;
277 };
278
279 //
280 // Per SMMU device
281 //
282 struct smmu_device {
283         void __iomem    *regs;
284         tegra_iovmm_addr_t      iovmm_base;     /* remappable base address */
285         unsigned long   page_count;             /* total remappable size */
286         spinlock_t      lock;
287         char            *name;
288         struct tegra_iovmm_device iovmm_dev;
289         int             num_ases;
290         struct smmu_as  *as;                    /* Run-time allocated array */
291         struct smmu_hwc_state   hwc_state[HWC_COUNT];
292         struct device   sysfs_dev;
293         int             sysfs_use_count;
294         bool            enable;
295         //
296         // Register image savers for suspend/resume
297         //
298         unsigned long translation_enable_0_0;
299         unsigned long translation_enable_1_0;
300         unsigned long translation_enable_2_0;
301         unsigned long asid_security_0;
302
303         int lowest_asid;        // Variable for hardware testing
304 };
305
306 #define VA_PAGE_TO_PA(va, page) \
307         (page_to_phys(page) + ((unsigned long)(va) & ~PAGE_MASK))
308
309 #define FLUSH_CPU_DCACHE(va, page, size)        \
310         do {    \
311                 unsigned long _pa_ = VA_PAGE_TO_PA(va, page);           \
312                 __cpuc_flush_dcache_area((void *)(va), (size_t)(size)); \
313                 outer_flush_range(_pa_, _pa_+(size_t)(size));           \
314         } while (0)
315
316 #define FLUSH_SMMU_REGS(smmu)   \
317         do { wmb(); (void)readl((smmu)->regs + MC_SMMU_CONFIG_0); } while(0)
318
319 //
320 // Flush all TLB entries and all PTC entries
321 // Caller must lock smmu
322 //
323 static void smmu_flush_regs(struct smmu_device *smmu, int enable)
324 {
325         writel(MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_MATCH_ALL |
326                         MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_disable,
327                 smmu->regs + MC_SMMU_TLB_FLUSH_0);
328         writel(MC_SMMU_PTC_FLUSH_0_PTC_FLUSH_TYPE_ALL,
329                 smmu->regs + MC_SMMU_PTC_FLUSH_0);
330
331         if (enable)
332                 writel(MC_SMMU_CONFIG_0_SMMU_ENABLE_ENABLE,
333                         smmu->regs + MC_SMMU_CONFIG_0);
334         FLUSH_SMMU_REGS(smmu);
335 }
336
337 static void smmu_setup_regs(struct smmu_device *smmu)
338 {
339         int i;
340
341         if (smmu->as) {
342                 int asid;
343
344                 // Set/restore page directory for each AS
345                 for (asid = 0; asid < smmu->num_ases; asid++) {
346                         struct smmu_as *as = &smmu->as[asid];
347
348                         spin_lock(&smmu->lock);
349                         writel(MC_SMMU_PTB_ASID_0_CURRENT_ASID(as->asid),
350                                 as->smmu->regs + MC_SMMU_PTB_ASID_0);
351                         writel(as->pdir_page
352                                 ? SMMU_MK_PDIR(as->pdir_page, _PDIR_ATTR)
353                                 : MC_SMMU_PTB_DATA_0_RESET_VAL,
354                                 as->smmu->regs + MC_SMMU_PTB_DATA_0);
355                         spin_unlock(&smmu->lock);
356                 }
357         }
358
359         // Set/restore ASID for each hardware client
360         for (i = 0; i < HWC_COUNT; i++) {
361                 struct smmu_hwc_state *hwcst = &smmu->hwc_state[i];
362                 writel(hwcst->enable_disable, smmu->regs + hwcst->reg);
363         }
364
365         writel(smmu->translation_enable_0_0,
366                 smmu->regs + MC_SMMU_TRANSLATION_ENABLE_0_0);
367         writel(smmu->translation_enable_1_0,
368                 smmu->regs + MC_SMMU_TRANSLATION_ENABLE_1_0);
369         writel(smmu->translation_enable_2_0,
370                 smmu->regs + MC_SMMU_TRANSLATION_ENABLE_2_0);
371         writel(smmu->asid_security_0,
372                 smmu->regs + MC_SMMU_ASID_SECURITY_0);
373 #ifdef HIT_MISS_STAT
374         writel(
375                 MC_SMMU_TLB_CONFIG_0_TLB_STATS_enable |
376                 MC_SMMU_TLB_CONFIG_0_TLB_HIT_UNDER_MISS_enable |
377                 MC_SMMU_TLB_CONFIG_0_TLB_ACTIVE_LINES_value,
378                 smmu->regs + MC_SMMU_TLB_CONFIG_0);
379
380         writel(
381                 MC_SMMU_PTC_CONFIG_0_PTC_STATS_enable |
382                 MC_SMMU_PTC_CONFIG_0_PTC_CACHE_enable |
383                 MC_SMMU_PTC_CONFIG_0_PTC_INDEX_MAP_pattern,
384                 smmu->regs + MC_SMMU_PTC_CONFIG_0);
385 #else
386         writel(MC_SMMU_TLB_CONFIG_0_RESET_VAL,
387                 smmu->regs + MC_SMMU_TLB_CONFIG_0);
388         writel(MC_SMMU_PTC_CONFIG_0_RESET_VAL,
389                 smmu->regs + MC_SMMU_PTC_CONFIG_0);
390 #endif
391
392         smmu_flush_regs(smmu, 1);
393 }
394
395 static int smmu_suspend(struct tegra_iovmm_device *dev)
396 {
397         struct smmu_device *smmu =
398                 container_of(dev, struct smmu_device, iovmm_dev);
399
400         smmu->translation_enable_0_0 =
401                 readl(smmu->regs + MC_SMMU_TRANSLATION_ENABLE_0_0);
402         smmu->translation_enable_1_0 =
403                 readl(smmu->regs + MC_SMMU_TRANSLATION_ENABLE_1_0);
404         smmu->translation_enable_2_0 =
405                 readl(smmu->regs + MC_SMMU_TRANSLATION_ENABLE_2_0);
406         smmu->asid_security_0 =
407                 readl(smmu->regs + MC_SMMU_ASID_SECURITY_0);
408         return 0;
409 }
410
411 static void smmu_resume(struct tegra_iovmm_device *dev)
412 {
413         struct smmu_device *smmu =
414                 container_of(dev, struct smmu_device, iovmm_dev);
415
416         if (!smmu->enable)
417                 return;
418
419         spin_lock(&smmu->lock);
420         smmu_setup_regs(smmu);
421         spin_unlock(&smmu->lock);
422 }
423
424 static void free_ptbl(struct smmu_as *as, unsigned long page_addr)
425 {
426         unsigned long pdn = SMMU_ADDR_TO_PDN(page_addr);
427         unsigned long *pdir = (unsigned long *)kmap(as->pdir_page);
428
429         if (pdir[pdn] != _PDE_VACANT(pdn)) {
430                 ClearPageReserved(SMMU_EX_PTBL_PAGE(pdir[pdn]));
431                 __free_page(SMMU_EX_PTBL_PAGE(pdir[pdn]));
432                 pdir[pdn] = _PDE_VACANT(pdn);
433                 FLUSH_CPU_DCACHE(&pdir[pdn], as->pdir_page, sizeof pdir[pdn]);
434         }
435         kunmap(as->pdir_page);
436 }
437
438 static void free_pdir(struct smmu_as *as)
439 {
440         if (as->pdir_page) {
441                 unsigned addr = as->smmu->iovmm_base;
442                 int count = as->smmu->page_count;
443
444                 while (count-- > 0) {
445                         free_ptbl(as, addr);
446                         addr += SMMU_PAGE_SIZE * SMMU_PTBL_COUNT;
447                 }
448                 ClearPageReserved(as->pdir_page);
449                 __free_page(as->pdir_page);
450                 as->pdir_page = NULL;
451                 kfree(as->pte_count);
452                 as->pte_count = NULL;
453         }
454 }
455
456 static int smmu_remove(struct platform_device *pdev)
457 {
458         struct smmu_device *smmu = platform_get_drvdata(pdev);
459
460         if (!smmu)
461                 return 0;
462
463         if (smmu->enable) {
464                 writel(MC_SMMU_CONFIG_0_SMMU_ENABLE_DISABLE,
465                         smmu->regs + MC_SMMU_CONFIG_0);
466                 smmu->enable = 0;
467         }
468         platform_set_drvdata(pdev, NULL);
469
470         if (smmu->as) {
471                 int asid;
472
473                 for (asid = 0; asid < smmu->num_ases; asid++)
474                         free_pdir(&smmu->as[asid]);
475                 kfree(smmu->as);
476         }
477
478         if (smmu->regs)
479                 iounmap(smmu->regs);
480         tegra_iovmm_unregister(&smmu->iovmm_dev);
481         kfree(smmu);
482         return 0;
483 }
484
485 //
486 // Maps PTBL for given page_addr and returns the PTE address
487 // Caller must unmap the mapped PTBL returned in *ptbl_page_p
488 //
489 static smmu_pte_t *locate_pte(struct smmu_as *as,
490                 unsigned long page_addr, bool allocate,
491                 struct page **ptbl_page_p,
492                 unsigned int **pte_counter)
493 {
494         unsigned long ptn = SMMU_ADDR_TO_PFN(page_addr);
495         unsigned long pdn = SMMU_ADDR_TO_PDN(page_addr);
496         smmu_pde_t *pdir = kmap(as->pdir_page);
497         smmu_pte_t *ptbl;
498
499         if (pdir[pdn] != _PDE_VACANT(pdn)) {
500                 // Mapped entry table already exists
501                 *ptbl_page_p = SMMU_EX_PTBL_PAGE(pdir[pdn]);
502                 ptbl = kmap(*ptbl_page_p);
503         } else if (!allocate) {
504                 kunmap(as->pdir_page);
505                 return NULL;
506         } else {
507                 // Vacant - allocate a new page table
508                 *ptbl_page_p = alloc_page(GFP_KERNEL|__GFP_DMA);
509                 if (!*ptbl_page_p) {
510                         kunmap(as->pdir_page);
511                         pr_err(DRIVER_NAME
512                         ": failed to allocate tegra_iovmm_device page table\n");
513                         return NULL;
514                 }
515                 SetPageReserved(*ptbl_page_p);
516                 ptbl = (unsigned long *)kmap(*ptbl_page_p);
517                 {
518                         int pn;
519                         unsigned long addr = SMMU_PDN_TO_ADDR(pdn);
520                         for (pn = 0; pn < SMMU_PTBL_COUNT;
521                                 pn++, addr += SMMU_PAGE_SIZE) {
522                                 ptbl[pn] = _PTE_VACANT(addr);
523                         }
524                 }
525                 FLUSH_CPU_DCACHE(ptbl, *ptbl_page_p, SMMU_PTBL_SIZE);
526                 pdir[pdn] = SMMU_MK_PDE(*ptbl_page_p, _PDE_ATTR_N);
527                 FLUSH_CPU_DCACHE(&pdir[pdn], as->pdir_page, sizeof pdir[pdn]);
528         }
529         *pte_counter = &as->pte_count[pdn];
530
531         kunmap(as->pdir_page);
532         return &ptbl[ptn % SMMU_PTBL_COUNT];
533 }
534
535 static void flush_tlb_and_ptc(struct smmu_device *smmu,
536                 struct smmu_as *as, unsigned long iova,
537                 smmu_pte_t *pte, struct page *ptpage)
538 {
539         writel(MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_MATCH_GROUP |
540                 ((iova & MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_GROUP_mask) >>
541                         MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_VA_GROUP_shift) |
542                 MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_MATCH_enable |
543                 (as->asid << MC_SMMU_TLB_FLUSH_0_TLB_FLUSH_ASID_SHIFT),
544                 smmu->regs + MC_SMMU_TLB_FLUSH_0);
545         writel(MC_SMMU_PTC_FLUSH_0_PTC_FLUSH_TYPE_ADR |
546                 VA_PAGE_TO_PA(pte, ptpage),
547                 smmu->regs + MC_SMMU_PTC_FLUSH_0);
548         FLUSH_SMMU_REGS(smmu);
549 }
550
551 static int smmu_map(struct tegra_iovmm_domain *domain,
552                 struct tegra_iovmm_area *iovma)
553 {
554         struct smmu_as *as = container_of(domain, struct smmu_as, domain);
555         unsigned long addr = iovma->iovm_start;
556         unsigned long pcount = iovma->iovm_length >> SMMU_PAGE_SHIFT;
557         int i;
558
559         for (i = 0; i < pcount; i++) {
560                 unsigned long pfn;
561                 smmu_pte_t *pte;
562                 unsigned int *pte_counter;
563                 struct page *ptpage;
564
565                 pfn = iovma->ops->lock_makeresident(iovma, i<<PAGE_SHIFT);
566                 if (!pfn_valid(pfn))
567                         goto fail;
568
569                 down(&as->sem);
570
571                 if (!(pte = locate_pte(as, addr, true, &ptpage, &pte_counter)))
572                         goto fail2;
573
574                 if (*pte == _PTE_VACANT(addr))
575                         (*pte_counter)++;
576                 *pte = SMMU_PFN_TO_PTE(pfn, as->pte_attr);
577                 if (unlikely((*pte == _PTE_VACANT(addr))))
578                         (*pte_counter)--;
579                 FLUSH_CPU_DCACHE(pte, ptpage, sizeof *pte);
580                 kunmap(ptpage);
581                 up(&as->sem);
582                 flush_tlb_and_ptc(as->smmu, as, addr, pte, ptpage);
583                 addr += SMMU_PAGE_SIZE;
584         }
585         return 0;
586
587 fail:
588         down(&as->sem);
589 fail2:
590
591         while (i-- > 0) {
592                 smmu_pte_t *pte;
593                 unsigned int *pte_counter;
594                 struct page *page;
595
596                 iovma->ops->release(iovma, i<<PAGE_SHIFT);
597                 addr -= SMMU_PAGE_SIZE;
598                 if ((pte = locate_pte(as, addr, false, &page, &pte_counter))) {
599                         if (*pte != _PTE_VACANT(addr)) {
600                                 *pte = _PTE_VACANT(addr);
601                                 FLUSH_CPU_DCACHE(pte, page, sizeof *pte);
602                                 kunmap(page);
603                                 if (!--(*pte_counter))
604                                         free_ptbl(as, addr);
605                         } else {
606                                 kunmap(page);
607                         }
608                 }
609         }
610         up(&as->sem);
611         return -ENOMEM;
612 }
613
614 static void smmu_unmap(struct tegra_iovmm_domain *domain,
615         struct tegra_iovmm_area *iovma, bool decommit)
616 {
617         struct smmu_as *as = container_of(domain, struct smmu_as, domain);
618         unsigned long addr = iovma->iovm_start;
619         unsigned int pcount = iovma->iovm_length >> SMMU_PAGE_SHIFT;
620         unsigned int i, *pte_counter;
621
622         down(&as->sem);
623         for (i = 0; i < pcount; i++) {
624                 unsigned long *pte;
625                 struct page *page;
626
627                 if (iovma->ops && iovma->ops->release)
628                         iovma->ops->release(iovma, i<<PAGE_SHIFT);
629
630                 if ((pte = locate_pte(as, addr, false, &page, &pte_counter))) {
631                         if (*pte != _PTE_VACANT(addr)) {
632                                 *pte = _PTE_VACANT(addr);
633                                 FLUSH_CPU_DCACHE(pte, page, sizeof *pte);
634                                 kunmap(page);
635                                 if (!--(*pte_counter) && decommit) {
636                                         free_ptbl(as, addr);
637                                         smmu_flush_regs(as->smmu, 0);
638                                 }
639                         }
640                 }
641                 addr += SMMU_PAGE_SIZE;
642         }
643         up(&as->sem);
644 }
645
646 static void smmu_map_pfn(struct tegra_iovmm_domain *domain,
647         struct tegra_iovmm_area *iovma, tegra_iovmm_addr_t addr,
648         unsigned long pfn)
649 {
650         struct smmu_as *as = container_of(domain, struct smmu_as, domain);
651         struct smmu_device *smmu = as->smmu;
652         smmu_pte_t *pte;
653         unsigned int *pte_counter;
654         struct page *ptpage;
655
656         BUG_ON(!pfn_valid(pfn));
657         down(&as->sem);
658         if ((pte = locate_pte(as, addr, true, &ptpage, &pte_counter))) {
659                 if (*pte == _PTE_VACANT(addr))
660                         (*pte_counter)++;
661                 *pte = SMMU_PFN_TO_PTE(pfn, as->pte_attr);
662                 if (unlikely((*pte == _PTE_VACANT(addr))))
663                         (*pte_counter)--;
664                 FLUSH_CPU_DCACHE(pte, ptpage, sizeof *pte);
665                 wmb();
666
667                 kunmap(ptpage);
668                 flush_tlb_and_ptc(smmu, as, addr, pte, ptpage);
669         }
670         up(&as->sem);
671 }
672
673 //
674 // Caller must lock/unlock as
675 //
676 static int alloc_pdir(struct smmu_as *as)
677 {
678         unsigned long *pdir;
679         int pdn;
680
681         if (as->pdir_page)
682                 return 0;
683
684         as->pte_count = kzalloc(sizeof(as->pte_count[0]) * SMMU_PDIR_COUNT,
685                                 GFP_KERNEL);
686         if (!as->pte_count) {
687                 pr_err(DRIVER_NAME
688                 ": failed to allocate tegra_iovmm_device PTE cunters\n");
689                 return -ENOMEM;
690         }
691         as->pdir_page = alloc_page(GFP_KERNEL|__GFP_DMA);
692         if (!as->pdir_page) {
693                 pr_err(DRIVER_NAME
694                 ": failed to allocate tegra_iovmm_device page directory\n");
695                 kfree(as->pte_count);
696                 as->pte_count = NULL;
697                 return -ENOMEM;
698         }
699         SetPageReserved(as->pdir_page);
700         pdir = kmap(as->pdir_page);
701
702         for (pdn = 0; pdn < SMMU_PDIR_COUNT; pdn++)
703                 pdir[pdn] = _PDE_VACANT(pdn);
704         FLUSH_CPU_DCACHE(pdir, as->pdir_page, SMMU_PDIR_SIZE);
705         kunmap(as->pdir_page);
706
707         return 0;
708 }
709
710 static void _sysfs_create(struct smmu_as *as, struct device *sysfs_parent);
711
712 //
713 // Allocate resources for an AS
714 //      TODO: split into "alloc" and "lock"
715 //
716 static struct tegra_iovmm_domain *smmu_alloc_domain(
717         struct tegra_iovmm_device *dev, struct tegra_iovmm_client *client)
718 {
719         struct smmu_device *smmu =
720                 container_of(dev, struct smmu_device, iovmm_dev);
721         struct smmu_as *as = NULL;
722         const struct domain_hwc_map *map = NULL;
723         int asid, i;
724
725         // Look for a free AS
726         for  (asid = smmu->lowest_asid; asid < smmu->num_ases; asid++) {
727                 down(&smmu->as[asid].sem);
728                 if (!smmu->as[asid].hwclients) {
729                         as = &smmu->as[asid];
730                         break;
731                 }
732                 up(&smmu->as[asid].sem);
733         }
734
735         if (!as) {
736                 pr_err(DRIVER_NAME ": no free AS\n");
737                 return NULL;
738         }
739
740         if (alloc_pdir(as) < 0)
741                 goto bad3;
742
743         // Look for a matching hardware client group
744         for (i = 0; ARRAY_SIZE(smmu_hwc_map); i++) {
745                 if (!strcmp(smmu_hwc_map[i].dev_name, client->misc_dev->name)) {
746                         map = &smmu_hwc_map[i];
747                         break;
748                 }
749         }
750
751         if (!map) {
752                 pr_err(DRIVER_NAME ": no SMMU resource for %s (%s)\n",
753                         client->name, client->misc_dev->name);
754                 goto bad2;
755         }
756
757         spin_lock(&smmu->lock);
758         // Update PDIR register
759         writel(MC_SMMU_PTB_ASID_0_CURRENT_ASID(as->asid),
760                 as->smmu->regs + MC_SMMU_PTB_ASID_0);
761         writel(SMMU_MK_PDIR(as->pdir_page, _PDIR_ATTR),
762                 as->smmu->regs + MC_SMMU_PTB_DATA_0);
763         FLUSH_SMMU_REGS(smmu);
764
765         // Put each hardware client in the group into the address space
766         for (i = 0; i < map->nr_hwcs; i++) {
767                 struct smmu_hwc_state *hwcst = &smmu->hwc_state[map->hwcs[i]];
768
769                 // Is the hardware client busy?
770                 if (hwcst->enable_disable != SMMU_ASID_DISABLE &&
771                         hwcst->enable_disable != SMMU_ASID_ENABLE(as->asid)) {
772                         pr_err(DRIVER_NAME
773                                 ": HW 0x%lx busy for ASID %ld (client!=%s)\n",
774                                 hwcst->reg,
775                                 SMMU_ASID_ASID(hwcst->enable_disable),
776                                 client->name);
777                         goto bad;
778                 }
779                 hwcst->enable_disable = SMMU_ASID_ENABLE(as->asid);
780                 writel(hwcst->enable_disable, smmu->regs + hwcst->reg);
781         }
782         FLUSH_SMMU_REGS(smmu);
783         spin_unlock(&smmu->lock);
784
785         as->hwclients = map;
786         _sysfs_create(as, client->misc_dev->this_device);
787         up(&as->sem);
788         return &as->domain;
789
790 bad:
791         // Reset hardware clients that have been enabled
792         while (--i >= 0) {
793                 struct smmu_hwc_state *hwcst = &smmu->hwc_state[map->hwcs[i]];
794
795                 hwcst->enable_disable = SMMU_ASID_DISABLE;
796                 writel(hwcst->enable_disable, smmu->regs + hwcst->reg);
797         }
798         FLUSH_SMMU_REGS(smmu);
799         spin_unlock(&as->smmu->lock);
800 bad2:
801         free_pdir(as);
802 bad3:
803         up(&as->sem);
804         return NULL;
805
806 }
807
808 //
809 // Release resources for an AS
810 //      TODO: split into "unlock" and "free"
811 //
812 static void smmu_free_domain(
813         struct tegra_iovmm_domain *domain, struct tegra_iovmm_client *client)
814 {
815         struct smmu_as *as = container_of(domain, struct smmu_as, domain);
816         struct smmu_device *smmu = as->smmu;
817         const struct domain_hwc_map *map = NULL;
818         int i;
819
820         down(&as->sem);
821         map = as->hwclients;
822
823         spin_lock(&smmu->lock);
824         for (i = 0; i < map->nr_hwcs; i++) {
825                 struct smmu_hwc_state *hwcst = &smmu->hwc_state[map->hwcs[i]];
826
827                 hwcst->enable_disable = SMMU_ASID_DISABLE;
828                 writel(SMMU_ASID_DISABLE, smmu->regs + hwcst->reg);
829         }
830         FLUSH_SMMU_REGS(smmu);
831         spin_unlock(&smmu->lock);
832
833         as->hwclients = NULL;
834         if (as->pdir_page) {
835                 spin_lock(&smmu->lock);
836                 writel(MC_SMMU_PTB_ASID_0_CURRENT_ASID(as->asid),
837                         smmu->regs + MC_SMMU_PTB_ASID_0);
838                 writel(MC_SMMU_PTB_DATA_0_RESET_VAL,
839                         smmu->regs + MC_SMMU_PTB_DATA_0);
840                 FLUSH_SMMU_REGS(smmu);
841                 spin_unlock(&smmu->lock);
842
843                 free_pdir(as);
844         }
845         up(&as->sem);
846 }
847
848 static struct tegra_iovmm_device_ops tegra_iovmm_smmu_ops = {
849         .map = smmu_map,
850         .unmap = smmu_unmap,
851         .map_pfn = smmu_map_pfn,
852         .alloc_domain = smmu_alloc_domain,
853         .free_domain = smmu_free_domain,
854         .suspend = smmu_suspend,
855         .resume = smmu_resume,
856 };
857
858 static int smmu_probe(struct platform_device *pdev)
859 {
860         struct smmu_device *smmu = NULL;
861         struct resource *regs = NULL, *window = NULL;
862         int e, asid;
863
864         if (!pdev) {
865                 pr_err(DRIVER_NAME ": platform_device required\n");
866                 return -ENODEV;
867         }
868
869         if (PAGE_SHIFT != SMMU_PAGE_SHIFT) {
870                 pr_err(DRIVER_NAME ": SMMU and CPU page sizes must match\n");
871                 return -ENXIO;
872         }
873
874         if (ARRAY_SIZE(smmu_hwc_state_init) != HWC_COUNT) {
875                 pr_err(DRIVER_NAME
876                         ": sizeof smmu_hwc_state_init != enum smmu_hwclient\n");
877                 return -ENXIO;
878         }
879
880         regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
881         window = platform_get_resource(pdev, IORESOURCE_MEM, 1);
882
883         if (!regs || !window) {
884                 pr_err(DRIVER_NAME ": No SMMU resources\n");
885                 return -ENODEV;
886         }
887         smmu = kzalloc(sizeof(*smmu), GFP_KERNEL);
888         if (!smmu) {
889                 pr_err(DRIVER_NAME ": failed to allocate smmu_device\n");
890                 return -ENOMEM;
891         }
892
893         smmu->num_ases = MC_SMMU_NUM_ASIDS;
894         smmu->iovmm_base = (tegra_iovmm_addr_t)window->start;
895         smmu->page_count = (window->end + 1 - window->start) >> SMMU_PAGE_SHIFT;
896         smmu->regs = ioremap(regs->start, regs->end + 1 - regs->start);
897         if (!smmu->regs) {
898                 pr_err(DRIVER_NAME ": failed to remap SMMU registers\n");
899                 e = -ENXIO;
900                 goto fail;
901         }
902
903         smmu->translation_enable_0_0 = ~0;
904         smmu->translation_enable_1_0 = ~0;
905         smmu->translation_enable_2_0 = ~0;
906         smmu->asid_security_0        = 0;
907
908         memcpy(smmu->hwc_state, smmu_hwc_state_init, sizeof(smmu->hwc_state));
909
910         smmu->iovmm_dev.name = VMM_NAME;
911         smmu->iovmm_dev.ops = &tegra_iovmm_smmu_ops;
912         smmu->iovmm_dev.pgsize_bits = SMMU_PAGE_SHIFT;
913
914         e = tegra_iovmm_register(&smmu->iovmm_dev);
915         if (e)
916                 goto fail;
917
918         smmu->as = kzalloc(sizeof(smmu->as[0]) * smmu->num_ases, GFP_KERNEL);
919         if (!smmu->as) {
920                 pr_err(DRIVER_NAME ": failed to allocate smmu_as\n");
921                 e = -ENOMEM;
922                 goto fail;
923         }
924
925         // Initialize address space structure array
926         for (asid = 0; asid < smmu->num_ases; asid++) {
927                 struct smmu_as *as = &smmu->as[asid];
928
929                 as->smmu = smmu;
930                 as->asid = asid;
931                 as->pte_attr = _PTE_ATTR;       // Default attributes
932
933                 sema_init(&as->sem, 1);
934
935                 e = tegra_iovmm_domain_init(&as->domain, &smmu->iovmm_dev,
936                         smmu->iovmm_base,
937                         smmu->iovmm_base +
938                                 (smmu->page_count << SMMU_PAGE_SHIFT));
939                 if (e)
940                         goto fail;
941         }
942         spin_lock_init(&smmu->lock);
943         smmu_setup_regs(smmu);
944         smmu->enable = 1;
945         platform_set_drvdata(pdev, smmu);
946         return 0;
947
948 fail:
949         if (smmu->regs)
950                 iounmap(smmu->regs);
951         if (smmu && smmu->as) {
952                 for (asid = 0; asid < smmu->num_ases; asid++) {
953                         if (smmu->as[asid].pdir_page) {
954                                 ClearPageReserved(smmu->as[asid].pdir_page);
955                                 __free_page(smmu->as[asid].pdir_page);
956                         }
957                 }
958                 kfree(smmu->as);
959         }
960         kfree(smmu);
961         return e;
962 }
963
964 static struct platform_driver tegra_iovmm_smmu_drv = {
965         .probe = smmu_probe,
966         .remove = smmu_remove,
967         .driver = {
968                 .name = DRIVER_NAME,
969         },
970 };
971
972 static int __devinit smmu_init(void)
973 {
974         return platform_driver_register(&tegra_iovmm_smmu_drv);
975 }
976
977 static void __exit smmu_exit(void)
978 {
979         return platform_driver_unregister(&tegra_iovmm_smmu_drv);
980 }
981
982 subsys_initcall(smmu_init);
983 module_exit(smmu_exit);
984
985 #ifdef SMMU_SYSFS
986 //
987 // SMMU-global sysfs interface for debugging
988 //
989 static ssize_t _sysfs_show_reg(struct device *d,
990                                 struct device_attribute *da, char *buf);
991 static ssize_t _sysfs_store_reg(struct device *d,
992                                 struct device_attribute *da, const char *buf,
993                                 size_t count);
994
995 #define _NAME_MAP(_name)        {       \
996         .name = __stringify(_name),     \
997         .offset = _name##_0,            \
998         .dev_attr = __ATTR(_name, S_IRUGO|S_IWUSR,      \
999                         _sysfs_show_reg, _sysfs_store_reg)      \
1000 }
1001
1002 static
1003 struct _reg_name_map {
1004         const char *name;
1005         unsigned        offset;
1006         struct device_attribute dev_attr;
1007 } _smmu_reg_name_map[] = {
1008         _NAME_MAP(MC_SMMU_CONFIG),
1009         _NAME_MAP(MC_SMMU_TLB_CONFIG),
1010         _NAME_MAP(MC_SMMU_PTC_CONFIG),
1011         _NAME_MAP(MC_SMMU_PTB_ASID),
1012         _NAME_MAP(MC_SMMU_PTB_DATA),
1013         _NAME_MAP(MC_SMMU_TLB_FLUSH),
1014         _NAME_MAP(MC_SMMU_PTC_FLUSH),
1015         _NAME_MAP(MC_SMMU_ASID_SECURITY),
1016         _NAME_MAP(MC_SMMU_STATS_TLB_HIT_COUNT),
1017         _NAME_MAP(MC_SMMU_STATS_TLB_MISS_COUNT),
1018         _NAME_MAP(MC_SMMU_STATS_PTC_HIT_COUNT),
1019         _NAME_MAP(MC_SMMU_STATS_PTC_MISS_COUNT),
1020         _NAME_MAP(MC_SMMU_TRANSLATION_ENABLE_0),
1021         _NAME_MAP(MC_SMMU_TRANSLATION_ENABLE_1),
1022         _NAME_MAP(MC_SMMU_TRANSLATION_ENABLE_2),
1023         _NAME_MAP(MC_SMMU_AFI_ASID),
1024         _NAME_MAP(MC_SMMU_AVPC_ASID),
1025         _NAME_MAP(MC_SMMU_DC_ASID),
1026         _NAME_MAP(MC_SMMU_DCB_ASID),
1027         _NAME_MAP(MC_SMMU_EPP_ASID),
1028         _NAME_MAP(MC_SMMU_G2_ASID),
1029         _NAME_MAP(MC_SMMU_HC_ASID),
1030         _NAME_MAP(MC_SMMU_HDA_ASID),
1031         _NAME_MAP(MC_SMMU_ISP_ASID),
1032         _NAME_MAP(MC_SMMU_MPE_ASID),
1033         _NAME_MAP(MC_SMMU_NV_ASID),
1034         _NAME_MAP(MC_SMMU_NV2_ASID),
1035         _NAME_MAP(MC_SMMU_PPCS_ASID),
1036         _NAME_MAP(MC_SMMU_SATA_ASID),
1037         _NAME_MAP(MC_SMMU_VDE_ASID),
1038         _NAME_MAP(MC_SMMU_VI_ASID),
1039 };
1040
1041 static struct attribute *_smmu_attrs[ARRAY_SIZE(_smmu_reg_name_map) + 3];
1042 static struct attribute_group _smmu_attr_group = {
1043         .attrs = _smmu_attrs
1044 };
1045
1046 static ssize_t lookup_reg(struct device_attribute *da)
1047 {
1048         int i;
1049         for (i = 0; i < ARRAY_SIZE(_smmu_reg_name_map); i++) {
1050                 if (!strcmp(_smmu_reg_name_map[i].name, da->attr.name))
1051                         return _smmu_reg_name_map[i].offset;
1052         }
1053         return -ENODEV;
1054 }
1055
1056 static ssize_t _sysfs_show_reg(struct device *d,
1057                                         struct device_attribute *da, char *buf)
1058 {
1059         struct smmu_device *smmu =
1060                 container_of(d, struct smmu_device, sysfs_dev);
1061         ssize_t offset = lookup_reg(da);
1062
1063         if (offset < 0)
1064                 return offset;
1065         return sprintf(buf, "%08lx\n",
1066                 (unsigned long)readl(smmu->regs + offset));
1067 }
1068
1069 static ssize_t _sysfs_store_reg(struct device *d,
1070                         struct device_attribute *da,
1071                         const char *buf, size_t count)
1072 {
1073         struct smmu_device *smmu =
1074                 container_of(d, struct smmu_device, sysfs_dev);
1075         ssize_t offset = lookup_reg(da);
1076         unsigned long value;
1077
1078         if (offset < 0)
1079                 return offset;
1080         value = simple_strtoul(buf, NULL, 16);
1081         writel(value, smmu->regs + offset);
1082         return count;
1083 }
1084
1085 static ssize_t _sysfs_show_smmu(struct device *d,
1086                                 struct device_attribute *da, char *buf)
1087 {
1088         struct smmu_device *smmu =
1089                 container_of(d, struct smmu_device, sysfs_dev);
1090         ssize_t rv = 0;
1091
1092         rv += sprintf(buf + rv , "      regs: %p\n", smmu->regs);
1093         rv += sprintf(buf + rv , "iovmm_base: %p\n", (void *)smmu->iovmm_base);
1094         rv += sprintf(buf + rv , "page_count: %lx\n", smmu->page_count);
1095         rv += sprintf(buf + rv , "  num_ases: %d\n", smmu->num_ases);
1096         rv += sprintf(buf + rv , "        as: %p\n", smmu->as);
1097         rv += sprintf(buf + rv , "    enable: %s\n",
1098                         smmu->enable ? "yes" : "no");
1099         return rv;
1100 }
1101
1102 static struct device_attribute _attr_show_smmu
1103                  = __ATTR(show_smmu, S_IRUGO, _sysfs_show_smmu, NULL);
1104
1105 static ssize_t _sysfs_show_lowest_asid(struct device *d,
1106                                 struct device_attribute *da, char *buf)
1107 {
1108         struct smmu_device *smmu =
1109                 container_of(d, struct smmu_device, sysfs_dev);
1110         ssize_t rv = 0;
1111
1112         rv += sprintf(buf + rv, "%d\n", smmu->lowest_asid);
1113         return rv;
1114 }
1115
1116 static ssize_t _sysfs_set_lowest_asid(struct device *d,
1117                                 struct device_attribute *da,
1118                                 const char *buf, int count)
1119 {
1120         struct smmu_device *smmu =
1121                 container_of(d, struct smmu_device, sysfs_dev);
1122         int value = simple_strtoul(buf, NULL, 10);
1123         if (0 <= value && value < MC_SMMU_NUM_ASIDS)
1124                 smmu->lowest_asid = value;
1125         return count;
1126 }
1127
1128 static struct device_attribute _attr_lowest_asid
1129                  = __ATTR(lowest_asid, S_IRUGO|S_IWUSR, _sysfs_show_lowest_asid,
1130                         _sysfs_set_lowest_asid);
1131
1132 static void _sysfs_smmu(struct smmu_device *smmu, struct device *parent)
1133 {
1134         int i;
1135
1136         if (smmu->sysfs_use_count++ > 0)
1137                 return;
1138         for (i = 0; i < ARRAY_SIZE(_smmu_reg_name_map); i++)
1139                 _smmu_attrs[i] = &_smmu_reg_name_map[i].dev_attr.attr;
1140         _smmu_attrs[i++] = &_attr_show_smmu.attr;
1141         _smmu_attrs[i++] = &_attr_lowest_asid.attr;
1142         _smmu_attrs[ARRAY_SIZE(_smmu_attrs) - 1] = NULL;
1143
1144         dev_set_name(&smmu->sysfs_dev, "smmu");
1145         smmu->sysfs_dev.parent = parent;
1146         smmu->sysfs_dev.driver = NULL;
1147         smmu->sysfs_dev.release = NULL;
1148         if (device_register(&smmu->sysfs_dev)) {
1149                 pr_err("%s: failed to register smmu_sysfs_dev\n", __func__);
1150                 smmu->sysfs_use_count--;
1151                 return;
1152         }
1153         if (sysfs_create_group(&smmu->sysfs_dev.kobj, &_smmu_attr_group)) {
1154                 pr_err("%s: failed to create group for smmu_sysfs_dev\n",
1155                         __func__);
1156                 smmu->sysfs_use_count--;
1157                 return;
1158         }
1159 }
1160 #endif
1161
1162 static void _sysfs_create(struct smmu_as *as, struct device *parent)
1163 {
1164 #ifdef SMMU_SYSFS
1165         _sysfs_smmu(as->smmu, parent);
1166 #endif
1167 }