iommu: tegra: correct device attach/detach
[linux-3.10.git] / drivers / iommu / tegra-smmu.c
1 /*
2  * IOMMU driver for SMMU on Tegra 3 series SoCs and later.
3  *
4  * Copyright (c) 2011-2013, NVIDIA CORPORATION.  All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program; if not, write to the Free Software Foundation, Inc.,
17  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18  */
19
20 #define pr_fmt(fmt)     "%s(): " fmt, __func__
21
22 #include <linux/module.h>
23 #include <linux/platform_device.h>
24 #include <linux/spinlock.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/mm.h>
28 #include <linux/pagemap.h>
29 #include <linux/device.h>
30 #include <linux/sched.h>
31 #include <linux/iommu.h>
32 #include <linux/io.h>
33 #include <linux/debugfs.h>
34 #include <linux/seq_file.h>
35 #include <linux/tegra-ahb.h>
36 #include <linux/of.h>
37 #include <linux/of_iommu.h>
38 #include <linux/tegra-ahb.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/bitops.h>
41 #include <linux/tegra-soc.h>
42
43 #include <asm/page.h>
44 #include <asm/cacheflush.h>
45 #include <asm/dma-iommu.h>
46
47 #include <mach/tegra_smmu.h>
48 #include <mach/tegra-swgid.h>
49
50 /* HACK! This needs to come from device tree */
51 #include "../../arch/arm/mach-tegra/iomap.h"
52
53 /* bitmap of the page sizes currently supported */
54 #define SMMU_IOMMU_PGSIZES      (SZ_4K | SZ_4M)
55
56 #define SMMU_CONFIG                             0x10
57 #define SMMU_CONFIG_DISABLE                     0
58 #define SMMU_CONFIG_ENABLE                      1
59
60 enum {
61         _TLB = 0,
62         _PTC,
63 };
64
65 #define SMMU_CACHE_CONFIG_BASE                  0x14
66 #define __SMMU_CACHE_CONFIG(mc, cache)          (SMMU_CACHE_CONFIG_BASE + 4 * cache)
67 #define SMMU_CACHE_CONFIG(cache)                __SMMU_CACHE_CONFIG(_MC, cache)
68
69 #define SMMU_CACHE_CONFIG_STATS_SHIFT           31
70 #define SMMU_CACHE_CONFIG_STATS_ENABLE          (1 << SMMU_CACHE_CONFIG_STATS_SHIFT)
71 #define SMMU_CACHE_CONFIG_STATS_TEST_SHIFT      30
72 #define SMMU_CACHE_CONFIG_STATS_TEST            (1 << SMMU_CACHE_CONFIG_STATS_TEST_SHIFT)
73
74 #define SMMU_TLB_CONFIG_HIT_UNDER_MISS__ENABLE  (1 << 29)
75 #define SMMU_TLB_CONFIG_ACTIVE_LINES__VALUE     0x10
76 #define SMMU_TLB_CONFIG_RESET_VAL               0x20000000
77 #define SMMU_TLB_RR_ARB                         (1 << 28)
78
79 #define SMMU_PTC_CONFIG_CACHE__ENABLE           (1 << 29)
80 #define SMMU_PTC_CONFIG_INDEX_MAP__PATTERN      0x3f
81 #define SMMU_PTC_CONFIG_RESET_VAL               0x2000003f
82 #define SMMU_PTC_REQ_LIMIT                      (8 << 24)
83
84 #define SMMU_PTB_ASID                           0x1c
85 #define SMMU_PTB_ASID_CURRENT_SHIFT             0
86
87 #define SMMU_PTB_DATA                           0x20
88 #define SMMU_PTB_DATA_RESET_VAL                 0
89 #define SMMU_PTB_DATA_ASID_NONSECURE_SHIFT      29
90 #define SMMU_PTB_DATA_ASID_WRITABLE_SHIFT       30
91 #define SMMU_PTB_DATA_ASID_READABLE_SHIFT       31
92
93 #define SMMU_TLB_FLUSH                          0x30
94 #define SMMU_TLB_FLUSH_VA_MATCH_ALL             0
95 #define SMMU_TLB_FLUSH_VA_MATCH_SECTION         2
96 #define SMMU_TLB_FLUSH_VA_MATCH_GROUP           3
97 #define SMMU_TLB_FLUSH_ASID_SHIFT_BASE          31
98 #define SMMU_TLB_FLUSH_ASID_MATCH_DISABLE       0
99 #define SMMU_TLB_FLUSH_ASID_MATCH_ENABLE        1
100 #define SMMU_TLB_FLUSH_ASID_MATCH_SHIFT         31
101 #define SMMU_TLB_FLUSH_ASID_ENABLE                                      \
102         (SMMU_TLB_FLUSH_ASID_MATCH_ENABLE << SMMU_TLB_FLUSH_ASID_MATCH_SHIFT)
103
104 #define SMMU_TLB_FLUSH_ASID_SHIFT(as)           \
105         (SMMU_TLB_FLUSH_ASID_SHIFT_BASE - __ffs((as)->smmu->num_as))
106 #define SMMU_ASID_MASK          ((1 << __ffs((as)->smmu->num_as)) - 1)
107
108 #define SMMU_PTC_FLUSH                          0x34
109 #define SMMU_PTC_FLUSH_TYPE_ALL                 0
110 #define SMMU_PTC_FLUSH_TYPE_ADR                 1
111 #define SMMU_PTC_FLUSH_ADR_SHIFT                4
112
113 #define SMMU_PTC_FLUSH_1                        0x9b8
114
115 #define SMMU_ASID_SECURITY                      0x38
116 #define SMMU_ASID_SECURITY_1                    0x3c
117 #define SMMU_ASID_SECURITY_2                    0x9e0
118 #define SMMU_ASID_SECURITY_3                    0x9e4
119 #define SMMU_ASID_SECURITY_4                    0x9e8
120 #define SMMU_ASID_SECURITY_5                    0x9ec
121 #define SMMU_ASID_SECURITY_6                    0x9f0
122 #define SMMU_ASID_SECURITY_7                    0x9f4
123
124 #define SMMU_STATS_CACHE_COUNT_BASE             0x1f0
125
126 #define SMMU_STATS_CACHE_COUNT(mc, cache, hitmiss)              \
127         (SMMU_STATS_CACHE_COUNT_BASE + 8 * cache + 4 * hitmiss)
128
129 #define SMMU_TRANSLATION_ENABLE_0               0x228
130
131 #define SMMU_AFI_ASID   0x238   /* PCIE */
132
133 #define SMMU_SWGRP_ASID_BASE    SMMU_AFI_ASID
134
135 #define HWGRP_COUNT     64
136
137 #define SMMU_PDE_NEXT_SHIFT             28
138
139 /* AHB Arbiter Registers */
140 #define AHB_XBAR_CTRL                           0xe0
141 #define AHB_XBAR_CTRL_SMMU_INIT_DONE_DONE       1
142 #define AHB_XBAR_CTRL_SMMU_INIT_DONE_SHIFT      17
143
144 #define SMMU_NUM_ASIDS                          4
145 #define SMMU_NUM_ASIDS_TEGRA12                  128
146 #define SMMU_TLB_FLUSH_VA_SECTION__MASK         0xffc00000
147 #define SMMU_TLB_FLUSH_VA_SECTION__SHIFT        12 /* right shift */
148 #define SMMU_TLB_FLUSH_VA_GROUP__MASK           0xffffc000
149 #define SMMU_TLB_FLUSH_VA_GROUP__SHIFT          12 /* right shift */
150 #define SMMU_TLB_FLUSH_VA(iova, which)  \
151         ((((iova) & SMMU_TLB_FLUSH_VA_##which##__MASK) >> \
152                 SMMU_TLB_FLUSH_VA_##which##__SHIFT) |   \
153         SMMU_TLB_FLUSH_VA_MATCH_##which)
154 #define SMMU_PTB_ASID_CUR(n)    \
155                 ((n) << SMMU_PTB_ASID_CURRENT_SHIFT)
156
157 #define SMMU_TLB_FLUSH_ALL 0
158
159 #define SMMU_TLB_FLUSH_ASID_MATCH_disable               \
160                 (SMMU_TLB_FLUSH_ASID_MATCH_DISABLE <<   \
161                         SMMU_TLB_FLUSH_ASID_MATCH_SHIFT)
162 #define SMMU_TLB_FLUSH_ASID_MATCH__ENABLE               \
163                 (SMMU_TLB_FLUSH_ASID_MATCH_ENABLE <<    \
164                         SMMU_TLB_FLUSH_ASID_MATCH_SHIFT)
165
166 #define SMMU_PAGE_SHIFT 12
167 #define SMMU_PAGE_SIZE  (1 << SMMU_PAGE_SHIFT)
168
169 #define SMMU_PDIR_COUNT 1024
170 #define SMMU_PDIR_SIZE  (sizeof(u32) * SMMU_PDIR_COUNT)
171 #define SMMU_PTBL_COUNT 1024
172 #define SMMU_PTBL_SIZE  (sizeof(u32) * SMMU_PTBL_COUNT)
173 #define SMMU_PDIR_SHIFT 12
174 #define SMMU_PDE_SHIFT  12
175 #define SMMU_PTE_SHIFT  12
176 #define SMMU_PFN_MASK   0x0fffffff
177
178 #define SMMU_ADDR_TO_PTN(addr)  (((addr) >> 12) & (BIT(10) - 1))
179 #define SMMU_ADDR_TO_PDN(addr)  ((addr) >> 22)
180 #define SMMU_PDN_TO_ADDR(pdn)   ((pdn) << 22)
181
182 #define _READABLE       (1 << SMMU_PTB_DATA_ASID_READABLE_SHIFT)
183 #define _WRITABLE       (1 << SMMU_PTB_DATA_ASID_WRITABLE_SHIFT)
184 #define _NONSECURE      (1 << SMMU_PTB_DATA_ASID_NONSECURE_SHIFT)
185 #define _PDE_NEXT       (1 << SMMU_PDE_NEXT_SHIFT)
186 #define _MASK_ATTR      (_READABLE | _WRITABLE | _NONSECURE)
187
188 #define _PDIR_ATTR      (_READABLE | _WRITABLE | _NONSECURE)
189
190 #define _PDE_ATTR       (_READABLE | _WRITABLE | _NONSECURE)
191 #define _PDE_ATTR_N     (_PDE_ATTR | _PDE_NEXT)
192 #define _PDE_VACANT(pdn)        (0)
193
194 #define _PTE_ATTR       (_READABLE | _WRITABLE | _NONSECURE)
195 #define _PTE_VACANT(addr)       (0)
196
197 #ifdef  CONFIG_TEGRA_IOMMU_SMMU_LINEAR
198 #undef  _PDE_VACANT
199 #undef  _PTE_VACANT
200 #define _PDE_VACANT(pdn)        (((pdn) << 10) | _PDE_ATTR)
201 #define _PTE_VACANT(addr)       (((addr) >> SMMU_PAGE_SHIFT) | _PTE_ATTR)
202 #endif
203
204 #define SMMU_MK_PDIR(page, attr)        \
205                 ((page_to_phys(page) >> SMMU_PDIR_SHIFT) | (attr))
206 #define SMMU_MK_PDE(page, attr)         \
207                 (u32)((page_to_phys(page) >> SMMU_PDE_SHIFT) | (attr))
208 #define SMMU_EX_PTBL_PAGE(pde)          \
209                 pfn_to_page((u32)(pde) & SMMU_PFN_MASK)
210 #define SMMU_PFN_TO_PTE(pfn, attr)      (u32)((pfn) | (attr))
211
212 #define SMMU_ASID_ENABLE(asid)  ((asid) | (1 << 31))
213 #define SMMU_ASID_DISABLE       0
214 #define SMMU_ASID_ASID(n)       ((n) & ~SMMU_ASID_ENABLE(0))
215
216 /* FIXME: client ID, only valid for T124 */
217 #define CSR_PTCR 0
218 #define CSR_DISPLAY0A 1
219 #define CSR_DISPLAY0AB 2
220 #define CSR_DISPLAY0B 3
221 #define CSR_DISPLAY0BB 4
222 #define CSR_DISPLAY0C 5
223 #define CSR_DISPLAY0CB 6
224 #define CSR_AFIR 14
225 #define CSR_AVPCARM7R 15
226 #define CSR_DISPLAYHC 16
227 #define CSR_DISPLAYHCB 17
228 #define CSR_HDAR 21
229 #define CSR_HOST1XDMAR 22
230 #define CSR_HOST1XR 23
231 #define CSR_MSENCSRD 28
232 #define CSR_PPCSAHBDMAR 29
233 #define CSR_PPCSAHBSLVR 30
234 #define CSR_SATAR 31
235 #define CSR_VDEBSEVR 34
236 #define CSR_VDEMBER 35
237 #define CSR_VDEMCER 36
238 #define CSR_VDETPER 37
239 #define CSR_MPCORELPR 38
240 #define CSR_MPCORER 39
241 #define CSW_MSENCSWR 43
242 #define CSW_AFIW 49
243 #define CSW_AVPCARM7W 50
244 #define CSW_HDAW 53
245 #define CSW_HOST1XW 54
246 #define CSW_MPCORELPW 56
247 #define CSW_MPCOREW 57
248 #define CSW_PPCSAHBDMAW 59
249 #define CSW_PPCSAHBSLVW 60
250 #define CSW_SATAW 61
251 #define CSW_VDEBSEVW 62
252 #define CSW_VDEDBGW 63
253 #define CSW_VDEMBEW 64
254 #define CSW_VDETPMW 65
255 #define CSR_ISPRA 68
256 #define CSW_ISPWA 70
257 #define CSW_ISPWB 71
258 #define CSR_XUSB_HOSTR 74
259 #define CSW_XUSB_HOSTW 75
260 #define CSR_XUSB_DEVR 76
261 #define CSW_XUSB_DEVW 77
262 #define CSR_ISPRAB 78
263 #define CSW_ISPWAB 80
264 #define CSW_ISPWBB 81
265 #define CSR_TSECSRD 84
266 #define CSW_TSECSWR 85
267 #define CSR_A9AVPSCR 86
268 #define CSW_A9AVPSCW 87
269 #define CSR_GPUSRD 88
270 #define CSW_GPUSWR 89
271 #define CSR_DISPLAYT 90
272 #define CSR_SDMMCRA 96
273 #define CSR_SDMMCRAA 97
274 #define CSR_SDMMCR 98
275 #define CSR_SDMMCRAB 99
276 #define CSW_SDMMCWA 100
277 #define CSW_SDMMCWAA 101
278 #define CSW_SDMMCW 102
279 #define CSW_SDMMCWAB 103
280 #define CSR_VICSRD 108
281 #define CSW_VICSWR 109
282 #define CSW_VIW 114
283 #define CSR_DISPLAYD 115
284
285 #define SMMU_CLIENT_CONF0       0x40
286
287 #define smmu_client_enable_hwgrp(c, m)  smmu_client_set_hwgrp(c, m, 1)
288 #define smmu_client_disable_hwgrp(c)    smmu_client_set_hwgrp(c, 0, 0)
289 #define __smmu_client_enable_hwgrp(c, m) __smmu_client_set_hwgrp(c, m, 1)
290 #define __smmu_client_disable_hwgrp(c)  __smmu_client_set_hwgrp(c, 0, 0)
291
292 static struct device *save_smmu_device;
293
294 static size_t smmu_flush_all_th_pages = SZ_512; /* number of threshold pages */
295
296 static const u32 smmu_asid_security_ofs[] = {
297         SMMU_ASID_SECURITY,
298         SMMU_ASID_SECURITY_1,
299         SMMU_ASID_SECURITY_2,
300         SMMU_ASID_SECURITY_3,
301         SMMU_ASID_SECURITY_4,
302         SMMU_ASID_SECURITY_5,
303         SMMU_ASID_SECURITY_6,
304         SMMU_ASID_SECURITY_7,
305 };
306
307 static size_t tegra_smmu_get_offset(int id)
308 {
309         switch (id) {
310         case SWGID_DC14:
311                 return 0x490;
312         case SWGID_DC12:
313                 return 0xa88;
314         case SWGID_AFI...SWGID_ISP:
315         case SWGID_MPE...SWGID_PPCS1:
316                 return (id - SWGID_AFI) * sizeof(u32) + SMMU_AFI_ASID;
317         case SWGID_SDMMC1A...63:
318                 return (id - SWGID_SDMMC1A) * sizeof(u32) + 0xa94;
319         };
320
321         BUG();
322 }
323
324 /*
325  * Per client for address space
326  */
327 struct smmu_client {
328         struct device           *dev;
329         struct list_head        list;
330         struct smmu_as          *as;
331         u64                     swgids;
332 };
333
334 /*
335  * Per address space
336  */
337 struct smmu_as {
338         struct smmu_device      *smmu;  /* back pointer to container */
339         unsigned int            asid;
340         spinlock_t              lock;   /* for pagetable */
341         struct page             *pdir_page;
342         u32                     pdir_attr;
343         u32                     pde_attr;
344         u32                     pte_attr;
345         unsigned int            *pte_count;
346
347         struct list_head        client;
348         spinlock_t              client_lock; /* for client list */
349 };
350
351 struct smmu_debugfs_info {
352         struct smmu_device *smmu;
353         int mc;
354         int cache;
355 };
356
357 /*
358  * Per SMMU device - IOMMU device
359  */
360 struct smmu_device {
361         void __iomem    *regs, *regs_ahbarb;
362         unsigned long   iovmm_base;     /* remappable base address */
363         unsigned long   page_count;     /* total remappable size */
364         spinlock_t      lock;
365         char            *name;
366         struct device   *dev;
367         u64             swgids;         /* memory client ID bitmap */
368         size_t          ptc_cache_size;
369         struct page *avp_vector_page;   /* dummy page shared by all AS's */
370
371         /*
372          * Register image savers for suspend/resume
373          */
374         int num_translation_enable;
375         u32 translation_enable[4];
376         int num_asid_security;
377         u32 asid_security[8];
378
379         struct dentry *debugfs_root;
380         struct smmu_debugfs_info *debugfs_info;
381
382         int             num_as;
383         struct smmu_as  as[0];          /* Run-time allocated array */
384 };
385
386 static struct smmu_device *smmu_handle; /* unique for a system */
387
388 /*
389  *      SMMU/AHB register accessors
390  */
391 static inline u32 smmu_read(struct smmu_device *smmu, size_t offs)
392 {
393         return readl(smmu->regs + offs);
394 }
395 static inline void smmu_write(struct smmu_device *smmu, u32 val, size_t offs)
396 {
397         writel(val, smmu->regs + offs);
398 }
399
400 static inline u32 ahb_read(struct smmu_device *smmu, size_t offs)
401 {
402         return readl(smmu->regs_ahbarb + offs);
403 }
404 static inline void ahb_write(struct smmu_device *smmu, u32 val, size_t offs)
405 {
406         writel(val, smmu->regs_ahbarb + offs);
407 }
408
409 static void __smmu_client_ordered(struct smmu_device *smmu, int id)
410 {
411         size_t offs;
412         u32 val;
413
414         offs = SMMU_CLIENT_CONF0;
415         offs += (id / BITS_PER_LONG) * sizeof(u32);
416
417         val = smmu_read(smmu, offs);
418         val |= BIT(id % BITS_PER_LONG);
419         smmu_write(smmu, val, offs);
420 }
421
422 static void smmu_client_ordered(struct smmu_device *smmu)
423 {
424         int i, id[] = {
425                 /* Add client ID here to be ordered */
426         };
427
428         for (i = 0; i < ARRAY_SIZE(id); i++)
429                 __smmu_client_ordered(smmu, id[i]);
430 }
431
432 #define VA_PAGE_TO_PA(va, page) \
433         (page_to_phys(page) + ((unsigned long)(va) & ~PAGE_MASK))
434
435 #define VA_PAGE_TO_PA_HI(va, page)      \
436         (u32)((u64)(page_to_phys(page)) >> 32)
437
438 #define FLUSH_CPU_DCACHE(va, page, size)        \
439         do {    \
440                 unsigned long _pa_ = VA_PAGE_TO_PA(va, page);           \
441                 __cpuc_flush_dcache_area((void *)(va), (size_t)(size)); \
442                 outer_flush_range(_pa_, _pa_+(size_t)(size));           \
443         } while (0)
444
445 /*
446  * Any interaction between any block on PPSB and a block on APB or AHB
447  * must have these read-back barriers to ensure the APB/AHB bus
448  * transaction is complete before initiating activity on the PPSB
449  * block.
450  */
451 #define FLUSH_SMMU_REGS(smmu)   smmu_read(smmu, SMMU_CONFIG)
452
453 static u64 tegra_smmu_of_get_swgids(struct device *dev)
454 {
455         size_t bytes;
456         const char *propname = "nvidia,memory-clients";
457         const __be32 *prop;
458         int i;
459         u64 swgids = 0;
460
461         prop = of_get_property(dev->of_node, propname, &bytes);
462         if (!prop || !bytes)
463                 return 0;
464
465         for (i = 0; i < bytes / sizeof(u32); i++, prop++)
466                 swgids |= 1ULL << be32_to_cpup(prop);
467
468         return swgids;
469 }
470
471 static int __smmu_client_set_hwgrp(struct smmu_client *c, u64 map, int on)
472 {
473         int i;
474         struct smmu_as *as = c->as;
475         u32 val, offs, mask = SMMU_ASID_ENABLE(as->asid);
476         struct smmu_device *smmu = as->smmu;
477
478         WARN_ON(!on && map);
479         if (on && !map)
480                 return -EINVAL;
481         if (!on)
482                 map = c->swgids;
483
484         for_each_set_bit(i, (unsigned long *)&map, HWGRP_COUNT) {
485
486                 /* FIXME: PCIe client hasn't been registered as IOMMU */
487                 if (i == SWGID_AFI)
488                         continue;
489
490                 offs = tegra_smmu_get_offset(i);
491                 val = smmu_read(smmu, offs);
492                 val &= ~SMMU_ASID_MASK; /* always overwrite ASID */
493
494                 if (on)
495                         val |= mask;
496                 else if (list_empty(&c->list))
497                         val = 0; /* turn off if this is the last */
498                 else
499                         return 0; /* leave if off but not the last */
500
501                 smmu_write(smmu, val, offs);
502
503                 dev_dbg(c->dev, "swgid:%d asid:%d %s @%s\n",
504                         i, val & SMMU_ASID_MASK,
505                          (val & BIT(31)) ? "Enabled" : "Disabled", __func__);
506         }
507         FLUSH_SMMU_REGS(smmu);
508         c->swgids = map;
509         return 0;
510
511 }
512
513 static int smmu_client_set_hwgrp(struct smmu_client *c, u64 map, int on)
514 {
515         u32 val;
516         unsigned long flags;
517         struct smmu_as *as = c->as;
518         struct smmu_device *smmu = as->smmu;
519
520         spin_lock_irqsave(&smmu->lock, flags);
521         val = __smmu_client_set_hwgrp(c, map, on);
522         spin_unlock_irqrestore(&smmu->lock, flags);
523         return val;
524 }
525
526 /*
527  * Flush all TLB entries and all PTC entries
528  * Caller must lock smmu
529  */
530 static void smmu_flush_regs(struct smmu_device *smmu, int enable)
531 {
532         u32 val;
533
534         smmu_write(smmu, SMMU_PTC_FLUSH_TYPE_ALL, SMMU_PTC_FLUSH);
535         FLUSH_SMMU_REGS(smmu);
536         val = SMMU_TLB_FLUSH_VA_MATCH_ALL |
537                 SMMU_TLB_FLUSH_ASID_MATCH_disable;
538         smmu_write(smmu, val, SMMU_TLB_FLUSH);
539
540         if (enable)
541                 smmu_write(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG);
542         FLUSH_SMMU_REGS(smmu);
543 }
544
545 static void smmu_setup_regs(struct smmu_device *smmu)
546 {
547         int i;
548         u32 val;
549
550         for (i = 0; i < smmu->num_as; i++) {
551                 struct smmu_as *as = &smmu->as[i];
552                 struct smmu_client *c;
553
554                 smmu_write(smmu, SMMU_PTB_ASID_CUR(as->asid), SMMU_PTB_ASID);
555                 val = as->pdir_page ?
556                         SMMU_MK_PDIR(as->pdir_page, as->pdir_attr) :
557                         SMMU_PTB_DATA_RESET_VAL;
558                 smmu_write(smmu, val, SMMU_PTB_DATA);
559
560                 list_for_each_entry(c, &as->client, list)
561                         __smmu_client_set_hwgrp(c, c->swgids, 1);
562         }
563
564         for (i = 0; i < smmu->num_translation_enable; i++)
565                 smmu_write(smmu, smmu->translation_enable[i],
566                            SMMU_TRANSLATION_ENABLE_0 + i * sizeof(u32));
567
568         for (i = 0; i < smmu->num_asid_security; i++)
569                 smmu_write(smmu,
570                            smmu->asid_security[i], smmu_asid_security_ofs[i]);
571
572         val = SMMU_PTC_CONFIG_RESET_VAL;
573         if (IS_ENABLED(CONFIG_ARCH_TEGRA_12x_SOC) &&
574             (tegra_get_chipid() == TEGRA_CHIPID_TEGRA12))
575                 val |= SMMU_PTC_REQ_LIMIT;
576
577         smmu_write(smmu, val, SMMU_CACHE_CONFIG(_PTC));
578
579         val = SMMU_TLB_CONFIG_RESET_VAL;
580         if (IS_ENABLED(CONFIG_ARCH_TEGRA_12x_SOC) &&
581             (tegra_get_chipid() == TEGRA_CHIPID_TEGRA12)) {
582                 val |= SMMU_TLB_RR_ARB;
583                 val |= SMMU_TLB_CONFIG_ACTIVE_LINES__VALUE << 1;
584         } else {
585                 val |= SMMU_TLB_CONFIG_ACTIVE_LINES__VALUE;
586         }
587
588         smmu_write(smmu, val, SMMU_CACHE_CONFIG(_TLB));
589
590         if (IS_ENABLED(CONFIG_ARCH_TEGRA_12x_SOC) &&
591             (tegra_get_chipid() == TEGRA_CHIPID_TEGRA12))
592                 smmu_client_ordered(smmu);
593
594         smmu_flush_regs(smmu, 1);
595
596         if (tegra_get_chipid() == TEGRA_CHIPID_TEGRA3
597                         || tegra_get_chipid() == TEGRA_CHIPID_TEGRA11
598                         || tegra_get_chipid() == TEGRA_CHIPID_TEGRA14) {
599                 val = ahb_read(smmu, AHB_XBAR_CTRL);
600                 val |= AHB_XBAR_CTRL_SMMU_INIT_DONE_DONE <<
601                         AHB_XBAR_CTRL_SMMU_INIT_DONE_SHIFT;
602                 ahb_write(smmu, val, AHB_XBAR_CTRL);
603         }
604 }
605
606
607 static void __smmu_flush_ptc(struct smmu_device *smmu, u32 *pte,
608                              struct page *page)
609 {
610         u32 val;
611
612         if (!pte) {
613                 smmu_write(smmu, SMMU_PTC_FLUSH_TYPE_ALL, SMMU_PTC_FLUSH);
614                 return;
615         }
616
617         if (IS_ENABLED(CONFIG_ARCH_TEGRA_12x_SOC) &&
618                 (tegra_get_chipid() == TEGRA_CHIPID_TEGRA12)) {
619                 val = VA_PAGE_TO_PA_HI(pte, page);
620                 smmu_write(smmu, val, SMMU_PTC_FLUSH_1);
621         }
622
623         val = SMMU_PTC_FLUSH_TYPE_ADR | VA_PAGE_TO_PA(pte, page);
624         smmu_write(smmu, val, SMMU_PTC_FLUSH);
625 }
626
627 static void smmu_flush_ptc(struct smmu_device *smmu, u32 *pte,
628                            struct page *page)
629 {
630         __smmu_flush_ptc(smmu, pte, page);
631         FLUSH_SMMU_REGS(smmu);
632 }
633
634 static inline void __smmu_flush_ptc_all(struct smmu_device *smmu)
635 {
636         __smmu_flush_ptc(smmu, 0, NULL);
637 }
638
639 static void __smmu_flush_tlb(struct smmu_device *smmu, struct smmu_as *as,
640                            dma_addr_t iova, int is_pde)
641 {
642         u32 val;
643
644         if (is_pde)
645                 val = SMMU_TLB_FLUSH_VA(iova, SECTION);
646         else
647                 val = SMMU_TLB_FLUSH_VA(iova, GROUP);
648
649         smmu_write(smmu, val, SMMU_TLB_FLUSH);
650 }
651
652 static inline void __smmu_flush_tlb_section(struct smmu_as *as, dma_addr_t iova)
653 {
654         __smmu_flush_tlb(as->smmu, as, iova, 1);
655 }
656
657 static void flush_ptc_and_tlb(struct smmu_device *smmu,
658                               struct smmu_as *as, dma_addr_t iova,
659                               u32 *pte, struct page *page, int is_pde)
660 {
661         __smmu_flush_ptc(smmu, pte, page);
662         __smmu_flush_tlb(smmu, as, iova, is_pde);
663         FLUSH_SMMU_REGS(smmu);
664 }
665
666 #ifdef CONFIG_TEGRA_ERRATA_1053704
667 /* Flush PTEs within the same L2 pagetable */
668 static void ____smmu_flush_tlb_range(struct smmu_device *smmu, dma_addr_t iova,
669                                    dma_addr_t end)
670 {
671         size_t unit = SZ_16K;
672
673         iova = round_down(iova, unit);
674         while (iova < end) {
675                 u32 val;
676
677                 val = SMMU_TLB_FLUSH_VA(iova, GROUP);
678                 smmu_write(smmu, val, SMMU_TLB_FLUSH);
679                 iova += unit;
680         }
681 }
682 #endif
683
684 static void flush_ptc_and_tlb_range(struct smmu_device *smmu,
685                                     struct smmu_as *as, dma_addr_t iova,
686                                     u32 *pte, struct page *page,
687                                     size_t count)
688 {
689         size_t unit = SZ_16K;
690         dma_addr_t end = iova + count * PAGE_SIZE;
691
692         iova = round_down(iova, unit);
693         while (iova < end) {
694                 int i;
695
696                 __smmu_flush_ptc(smmu, pte, page);
697                 pte += smmu->ptc_cache_size / PAGE_SIZE;
698
699                 for (i = 0; i < smmu->ptc_cache_size / unit; i++) {
700                         u32 val;
701
702                         val = SMMU_TLB_FLUSH_VA(iova, GROUP);
703                         smmu_write(smmu, val, SMMU_TLB_FLUSH);
704                         iova += unit;
705                 }
706         }
707
708         FLUSH_SMMU_REGS(smmu);
709 }
710
711 static inline void flush_ptc_and_tlb_all(struct smmu_device *smmu,
712                                          struct smmu_as *as)
713 {
714         flush_ptc_and_tlb(smmu, as, 0, 0, NULL, 1);
715 }
716
717 static void free_ptbl(struct smmu_as *as, dma_addr_t iova, bool flush)
718 {
719         int pdn = SMMU_ADDR_TO_PDN(iova);
720         u32 *pdir = (u32 *)page_address(as->pdir_page);
721
722         if (pdir[pdn] != _PDE_VACANT(pdn)) {
723                 dev_dbg(as->smmu->dev, "pdn: %x\n", pdn);
724
725                 ClearPageReserved(SMMU_EX_PTBL_PAGE(pdir[pdn]));
726                 __free_page(SMMU_EX_PTBL_PAGE(pdir[pdn]));
727                 pdir[pdn] = _PDE_VACANT(pdn);
728                 FLUSH_CPU_DCACHE(&pdir[pdn], as->pdir_page, sizeof pdir[pdn]);
729                 if (!flush)
730                         return;
731
732                 flush_ptc_and_tlb(as->smmu, as, iova, &pdir[pdn],
733                                   as->pdir_page, 1);
734         }
735 }
736
737 #ifdef CONFIG_TEGRA_ERRATA_1053704
738 static void __smmu_flush_tlb_range(struct smmu_as *as, dma_addr_t iova,
739                                  dma_addr_t end)
740 {
741         u32 *pdir;
742         struct smmu_device *smmu = as->smmu;
743
744         if (!pfn_valid(page_to_pfn(as->pdir_page)))
745                 return;
746
747         pdir = page_address(as->pdir_page);
748         while (iova < end) {
749                 int pdn = SMMU_ADDR_TO_PDN(iova);
750
751                 if (pdir[pdn] & _PDE_NEXT) {
752                         struct page *page = SMMU_EX_PTBL_PAGE(pdir[pdn]);
753                         dma_addr_t _end = min_t(dma_addr_t, end,
754                                                 SMMU_PDN_TO_ADDR(pdn + 1));
755
756                         if (pfn_valid(page_to_pfn(page)))
757                                 ____smmu_flush_tlb_range(smmu, iova, _end);
758
759                         iova = _end;
760                 } else {
761                         if (pdir[pdn])
762                                 __smmu_flush_tlb_section(as, iova);
763
764                         iova = SMMU_PDN_TO_ADDR(pdn + 1);
765                 }
766
767                 if (pdn == SMMU_PTBL_COUNT - 1)
768                         break;
769         }
770 }
771
772 static void __smmu_flush_tlb_as(struct smmu_as *as, dma_addr_t iova,
773                               dma_addr_t end)
774 {
775         __smmu_flush_tlb_range(as, iova, end);
776 }
777 #else
778 static void __smmu_flush_tlb_as(struct smmu_as *as, dma_addr_t iova,
779                               dma_addr_t end)
780 {
781         u32 val;
782         struct smmu_device *smmu = as->smmu;
783
784         val = SMMU_TLB_FLUSH_ASID_ENABLE |
785                 (as->asid << SMMU_TLB_FLUSH_ASID_SHIFT(as));
786         smmu_write(smmu, val, SMMU_TLB_FLUSH);
787 }
788 #endif
789
790 static void flush_ptc_and_tlb_as(struct smmu_as *as, dma_addr_t start,
791                                  dma_addr_t end)
792 {
793         struct smmu_device *smmu = as->smmu;
794
795         __smmu_flush_ptc_all(smmu);
796         __smmu_flush_tlb_as(as, start, end);
797         FLUSH_SMMU_REGS(smmu);
798 }
799
800 static void free_pdir(struct smmu_as *as)
801 {
802         unsigned long addr;
803         int count;
804         struct device *dev = as->smmu->dev;
805
806         if (!as->pdir_page)
807                 return;
808
809         addr = as->smmu->iovmm_base;
810         count = as->smmu->page_count;
811         while (count-- > 0) {
812                 free_ptbl(as, addr, 1);
813                 addr += SMMU_PAGE_SIZE * SMMU_PTBL_COUNT;
814         }
815         ClearPageReserved(as->pdir_page);
816         __free_page(as->pdir_page);
817         as->pdir_page = NULL;
818         devm_kfree(dev, as->pte_count);
819         as->pte_count = NULL;
820 }
821
822 static struct page *alloc_ptbl(struct smmu_as *as, dma_addr_t iova, bool flush)
823 {
824         int i;
825         u32 *pdir = page_address(as->pdir_page);
826         int pdn = SMMU_ADDR_TO_PDN(iova);
827         unsigned long addr = SMMU_PDN_TO_ADDR(pdn);
828         struct page *page;
829         u32 *ptbl;
830         gfp_t gfp = GFP_ATOMIC;
831
832         if (IS_ENABLED(CONFIG_PREEMPT) && !in_atomic())
833                 gfp = GFP_KERNEL;
834
835         if (!IS_ENABLED(CONFIG_TEGRA_IOMMU_SMMU_LINEAR))
836                 gfp |= __GFP_ZERO;
837
838         /* Vacant - allocate a new page table */
839         dev_dbg(as->smmu->dev, "New PTBL pdn: %x\n", pdn);
840
841         page = alloc_page(gfp);
842         if (!page)
843                 return NULL;
844
845         SetPageReserved(page);
846         ptbl = (u32 *)page_address(page);
847         if (IS_ENABLED(CONFIG_TEGRA_IOMMU_SMMU_LINEAR)) {
848                 for (i = 0; i < SMMU_PTBL_COUNT; i++) {
849                         ptbl[i] = _PTE_VACANT(addr);
850                         addr += SMMU_PAGE_SIZE;
851                 }
852         }
853
854         FLUSH_CPU_DCACHE(ptbl, page, SMMU_PTBL_SIZE);
855         pdir[pdn] = SMMU_MK_PDE(page, as->pde_attr | _PDE_NEXT);
856         FLUSH_CPU_DCACHE(&pdir[pdn], as->pdir_page, sizeof pdir[pdn]);
857         if (flush)
858                 flush_ptc_and_tlb(as->smmu, as, iova, &pdir[pdn],
859                                   as->pdir_page, 1);
860         return page;
861 }
862
863 /*
864  * Maps PTBL for given iova and returns the PTE address
865  * Caller must unmap the mapped PTBL returned in *ptbl_page_p
866  */
867 static u32 *locate_pte(struct smmu_as *as,
868                                  dma_addr_t iova, bool allocate,
869                                  struct page **ptbl_page_p,
870                                  unsigned int **count)
871 {
872         int ptn = SMMU_ADDR_TO_PTN(iova);
873         int pdn = SMMU_ADDR_TO_PDN(iova);
874         u32 *pdir = page_address(as->pdir_page);
875         u32 *ptbl;
876
877         if (pdir[pdn] != _PDE_VACANT(pdn)) {
878                 /* Mapped entry table already exists */
879                 *ptbl_page_p = SMMU_EX_PTBL_PAGE(pdir[pdn]);
880         } else if (!allocate) {
881                 return NULL;
882         } else {
883                 *ptbl_page_p = alloc_ptbl(as, iova, 1);
884                 if (!*ptbl_page_p)
885                         return NULL;
886         }
887
888         ptbl = page_address(*ptbl_page_p);
889         *count = &as->pte_count[pdn];
890         return &ptbl[ptn];
891 }
892
893 #ifdef CONFIG_SMMU_SIG_DEBUG
894 static void put_signature(struct smmu_as *as,
895                           dma_addr_t iova, unsigned long pfn)
896 {
897         struct page *page;
898         u32 *vaddr;
899
900         page = pfn_to_page(pfn);
901         vaddr = page_address(page);
902         if (!vaddr)
903                 return;
904
905         vaddr[0] = iova;
906         vaddr[1] = pfn << PAGE_SHIFT;
907         FLUSH_CPU_DCACHE(vaddr, page, sizeof(vaddr[0]) * 2);
908 }
909 #else
910 static inline void put_signature(struct smmu_as *as,
911                                  unsigned long addr, unsigned long pfn)
912 {
913 }
914 #endif
915
916 /*
917  * Caller must not hold as->lock
918  */
919 static int alloc_pdir(struct smmu_as *as)
920 {
921         u32 *pdir;
922         unsigned long flags;
923         int pdn, err = 0;
924         u32 val;
925         struct smmu_device *smmu = as->smmu;
926         struct page *page;
927         unsigned int *cnt;
928
929         /*
930          * do the allocation, then grab as->lock
931          */
932         cnt = devm_kzalloc(smmu->dev,
933                            sizeof(cnt[0]) * SMMU_PDIR_COUNT,
934                            GFP_KERNEL);
935         page = alloc_page(GFP_KERNEL | __GFP_DMA);
936
937         spin_lock_irqsave(&as->lock, flags);
938
939         if (as->pdir_page) {
940                 /* We raced, free the redundant */
941                 err = -EAGAIN;
942                 goto err_out;
943         }
944
945         if (!page || !cnt) {
946                 dev_err(smmu->dev, "failed to allocate at %s\n", __func__);
947                 err = -ENOMEM;
948                 goto err_out;
949         }
950
951         as->pdir_page = page;
952         as->pte_count = cnt;
953
954         SetPageReserved(as->pdir_page);
955         pdir = page_address(as->pdir_page);
956
957         for (pdn = 0; pdn < SMMU_PDIR_COUNT; pdn++)
958                 pdir[pdn] = _PDE_VACANT(pdn);
959         FLUSH_CPU_DCACHE(pdir, as->pdir_page, SMMU_PDIR_SIZE);
960         smmu_flush_ptc(smmu, pdir, as->pdir_page);
961         val = SMMU_TLB_FLUSH_VA_MATCH_ALL |
962                 SMMU_TLB_FLUSH_ASID_MATCH__ENABLE |
963                 (as->asid << SMMU_TLB_FLUSH_ASID_SHIFT(as));
964         smmu_write(smmu, val, SMMU_TLB_FLUSH);
965         FLUSH_SMMU_REGS(as->smmu);
966
967         spin_unlock_irqrestore(&as->lock, flags);
968
969         return 0;
970
971 err_out:
972         spin_unlock_irqrestore(&as->lock, flags);
973
974         if (page)
975                 __free_page(page);
976         if (cnt)
977                 devm_kfree(smmu->dev, cnt);
978         return err;
979 }
980
981 static size_t __smmu_iommu_unmap_pages(struct smmu_as *as, dma_addr_t iova,
982                                        size_t bytes)
983 {
984         int total = bytes >> PAGE_SHIFT;
985         u32 *pdir = page_address(as->pdir_page);
986         struct smmu_device *smmu = as->smmu;
987         unsigned long iova_base = iova;
988         bool flush_all = (total > smmu_flush_all_th_pages) ? true : false;
989
990         while (total > 0) {
991                 int ptn = SMMU_ADDR_TO_PTN(iova);
992                 int pdn = SMMU_ADDR_TO_PDN(iova);
993                 struct page *page = SMMU_EX_PTBL_PAGE(pdir[pdn]);
994                 u32 *ptbl;
995                 u32 *pte;
996                 int count;
997
998                 if (!pfn_valid(page_to_pfn(page))) {
999                         total -= SMMU_PDN_TO_ADDR(pdn + 1) - iova;
1000                         iova = SMMU_PDN_TO_ADDR(pdn + 1);
1001                         continue;
1002                 }
1003
1004                 ptbl = page_address(page);
1005                 pte = &ptbl[ptn];
1006                 count = min_t(int, SMMU_PTBL_COUNT - ptn, total);
1007
1008                 dev_dbg(as->smmu->dev, "unmapping %d pages at once\n", count);
1009
1010                 if (pte) {
1011                         unsigned int *rest = &as->pte_count[pdn];
1012                         size_t bytes = sizeof(*pte) * count;
1013
1014                         memset(pte, 0, bytes);
1015                         FLUSH_CPU_DCACHE(pte, page, bytes);
1016
1017                         *rest -= count;
1018                         if (!*rest)
1019                                 free_ptbl(as, iova, !flush_all);
1020
1021                         if (!flush_all)
1022                                 flush_ptc_and_tlb_range(smmu, as, iova, pte,
1023                                                         page, count);
1024                 }
1025
1026                 iova += PAGE_SIZE * count;
1027                 total -= count;
1028         }
1029
1030         if (flush_all)
1031                 flush_ptc_and_tlb_as(as, iova_base,
1032                                      iova_base + bytes);
1033
1034         return bytes;
1035 }
1036
1037 static size_t __smmu_iommu_unmap_largepage(struct smmu_as *as, dma_addr_t iova)
1038 {
1039         int pdn = SMMU_ADDR_TO_PDN(iova);
1040         u32 *pdir = (u32 *)page_address(as->pdir_page);
1041
1042         pdir[pdn] = _PDE_VACANT(pdn);
1043         FLUSH_CPU_DCACHE(&pdir[pdn], as->pdir_page, sizeof pdir[pdn]);
1044         flush_ptc_and_tlb(as->smmu, as, iova, &pdir[pdn], as->pdir_page, 1);
1045         return SZ_4M;
1046 }
1047
1048 static int __smmu_iommu_map_pfn(struct smmu_as *as, dma_addr_t iova,
1049                                 unsigned long pfn, int prot)
1050 {
1051         struct smmu_device *smmu = as->smmu;
1052         u32 *pte;
1053         unsigned int *count;
1054         struct page *page;
1055         int attrs = as->pte_attr;
1056
1057         pte = locate_pte(as, iova, true, &page, &count);
1058         if (WARN_ON(!pte))
1059                 return -ENOMEM;
1060
1061         if (*pte == _PTE_VACANT(iova))
1062                 (*count)++;
1063
1064         if (dma_get_attr(DMA_ATTR_READ_ONLY, (struct dma_attrs *)prot))
1065                 attrs &= ~_WRITABLE;
1066         else if (dma_get_attr(DMA_ATTR_WRITE_ONLY, (struct dma_attrs *)prot))
1067                 attrs &= ~_READABLE;
1068
1069         *pte = SMMU_PFN_TO_PTE(pfn, attrs);
1070         FLUSH_CPU_DCACHE(pte, page, sizeof(*pte));
1071         flush_ptc_and_tlb(smmu, as, iova, pte, page, 0);
1072         put_signature(as, iova, pfn);
1073         return 0;
1074 }
1075
1076 static int __smmu_iommu_map_page(struct smmu_as *as, dma_addr_t iova,
1077                                  phys_addr_t pa, int prot)
1078 {
1079         unsigned long pfn = __phys_to_pfn(pa);
1080
1081         return __smmu_iommu_map_pfn(as, iova, pfn, prot);
1082 }
1083
1084 static int __smmu_iommu_map_largepage(struct smmu_as *as, dma_addr_t iova,
1085                                  phys_addr_t pa, int prot)
1086 {
1087         int pdn = SMMU_ADDR_TO_PDN(iova);
1088         u32 *pdir = (u32 *)page_address(as->pdir_page);
1089         int attrs = _PDE_ATTR;
1090
1091         if (pdir[pdn] != _PDE_VACANT(pdn))
1092                 return -EINVAL;
1093
1094         if (dma_get_attr(DMA_ATTR_READ_ONLY, (struct dma_attrs *)prot))
1095                 attrs &= ~_WRITABLE;
1096         else if (dma_get_attr(DMA_ATTR_WRITE_ONLY, (struct dma_attrs *)prot))
1097                 attrs &= ~_READABLE;
1098
1099         pdir[pdn] = SMMU_ADDR_TO_PDN(pa) << 10 | attrs;
1100         FLUSH_CPU_DCACHE(&pdir[pdn], as->pdir_page, sizeof pdir[pdn]);
1101         flush_ptc_and_tlb(as->smmu, as, iova, &pdir[pdn], as->pdir_page, 1);
1102
1103         return 0;
1104 }
1105
1106 static int smmu_iommu_map(struct iommu_domain *domain, unsigned long iova,
1107                           phys_addr_t pa, size_t bytes, int prot)
1108 {
1109         struct smmu_as *as = domain->priv;
1110         unsigned long flags;
1111         int err;
1112         int (*fn)(struct smmu_as *as, dma_addr_t iova, phys_addr_t pa,
1113                   int prot);
1114
1115         dev_dbg(as->smmu->dev, "[%d] %08lx:%pa\n", as->asid, iova, &pa);
1116
1117         switch (bytes) {
1118         case SZ_4K:
1119                 fn = __smmu_iommu_map_page;
1120                 break;
1121         case SZ_4M:
1122                 fn = __smmu_iommu_map_largepage;
1123                 break;
1124         default:
1125                 WARN(1,  "%d not supported\n", bytes);
1126                 return -EINVAL;
1127         }
1128
1129         spin_lock_irqsave(&as->lock, flags);
1130         err = fn(as, iova, pa, prot);
1131         spin_unlock_irqrestore(&as->lock, flags);
1132         return err;
1133 }
1134
1135 static int smmu_iommu_map_pages(struct iommu_domain *domain, unsigned long iova,
1136                                 struct page **pages, size_t total, int prot)
1137 {
1138         struct smmu_as *as = domain->priv;
1139         struct smmu_device *smmu = as->smmu;
1140         u32 *pdir = page_address(as->pdir_page);
1141         int err = 0;
1142         unsigned long iova_base = iova;
1143         bool flush_all = (total > smmu_flush_all_th_pages) ? true : false;
1144         int attrs = as->pte_attr;
1145
1146         if (dma_get_attr(DMA_ATTR_READ_ONLY, (struct dma_attrs *)prot))
1147                 attrs &= ~_WRITABLE;
1148         else if (dma_get_attr(DMA_ATTR_WRITE_ONLY, (struct dma_attrs *)prot))
1149                 attrs &= ~_READABLE;
1150
1151         while (total > 0) {
1152                 int pdn = SMMU_ADDR_TO_PDN(iova);
1153                 int ptn = SMMU_ADDR_TO_PTN(iova);
1154                 unsigned int *rest = &as->pte_count[pdn];
1155                 int count = min_t(size_t, SMMU_PTBL_COUNT - ptn, total);
1156                 struct page *tbl_page;
1157                 u32 *ptbl;
1158                 u32 *pte;
1159                 int i;
1160                 unsigned long flags;
1161
1162                 spin_lock_irqsave(&as->lock, flags);
1163
1164                 if (pdir[pdn] == _PDE_VACANT(pdn)) {
1165                         tbl_page = alloc_ptbl(as, iova, !flush_all);
1166                         if (!tbl_page) {
1167                                 err = -ENOMEM;
1168                                 spin_unlock_irqrestore(&as->lock, flags);
1169                                 goto out;
1170                         }
1171
1172                 } else {
1173                         tbl_page = SMMU_EX_PTBL_PAGE(pdir[pdn]);
1174                 }
1175
1176                 ptbl = page_address(tbl_page);
1177                 for (i = 0; i < count; i++) {
1178                         pte = &ptbl[ptn + i];
1179
1180                         if (*pte == _PTE_VACANT(iova + i * PAGE_SIZE))
1181                                 (*rest)++;
1182
1183                         *pte = SMMU_PFN_TO_PTE(page_to_pfn(pages[i]), attrs);
1184                 }
1185
1186                 pte = &ptbl[ptn];
1187                 FLUSH_CPU_DCACHE(pte, tbl_page, count * sizeof(u32 *));
1188                 if (!flush_all)
1189                         flush_ptc_and_tlb_range(smmu, as, iova, pte, tbl_page,
1190                                                 count);
1191
1192                 iova += PAGE_SIZE * count;
1193                 total -= count;
1194                 pages += count;
1195
1196                 spin_unlock_irqrestore(&as->lock, flags);
1197         }
1198
1199 out:
1200         if (flush_all)
1201                 flush_ptc_and_tlb_as(as, iova_base,
1202                                      iova_base + total * PAGE_SIZE);
1203         return err;
1204 }
1205
1206 static int smmu_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
1207                              struct scatterlist *sgl, int npages, int prot)
1208 {
1209         int err = 0;
1210         unsigned long iova_base = iova;
1211         bool flush_all = (npages > smmu_flush_all_th_pages) ? true : false;
1212         struct smmu_as *as = domain->priv;
1213         u32 *pdir = page_address(as->pdir_page);
1214         struct smmu_device *smmu = as->smmu;
1215         int attrs = as->pte_attr;
1216         size_t total = npages;
1217         size_t sg_remaining = sgl->length >> PAGE_SHIFT;
1218         unsigned long sg_pfn = page_to_pfn(sg_page(sgl));
1219
1220         if (dma_get_attr(DMA_ATTR_READ_ONLY, (struct dma_attrs *)prot))
1221                 attrs &= ~_WRITABLE;
1222         else if (dma_get_attr(DMA_ATTR_WRITE_ONLY, (struct dma_attrs *)prot))
1223                 attrs &= ~_READABLE;
1224
1225         while (total > 0) {
1226                 int pdn = SMMU_ADDR_TO_PDN(iova);
1227                 int ptn = SMMU_ADDR_TO_PTN(iova);
1228                 unsigned int *rest = &as->pte_count[pdn];
1229                 int count = min_t(size_t, SMMU_PTBL_COUNT - ptn, total);
1230                 struct page *tbl_page;
1231                 u32 *ptbl;
1232                 u32 *pte;
1233                 int i;
1234                 unsigned long flags;
1235
1236                 spin_lock_irqsave(&as->lock, flags);
1237
1238                 if (pdir[pdn] == _PDE_VACANT(pdn)) {
1239                         tbl_page = alloc_ptbl(as, iova, !flush_all);
1240                         if (!tbl_page) {
1241                                 err = -ENOMEM;
1242                                 spin_unlock_irqrestore(&as->lock, flags);
1243                                 break;
1244                         }
1245
1246                 } else {
1247                         tbl_page = SMMU_EX_PTBL_PAGE(pdir[pdn]);
1248                 }
1249
1250                 ptbl = page_address(tbl_page);
1251                 for (i = 0; i < count; i++) {
1252
1253                         pte = &ptbl[ptn + i];
1254                         if (*pte == _PTE_VACANT(iova + i * PAGE_SIZE))
1255                                 (*rest)++;
1256
1257                         *pte = SMMU_PFN_TO_PTE(sg_pfn++, attrs);
1258                         if (--sg_remaining)
1259                                 continue;
1260
1261                         sgl = sg_next(sgl);
1262                         if (sgl) {
1263                                 sg_pfn = page_to_pfn(sg_page(sgl));
1264                                 sg_remaining = sgl->length >> PAGE_SHIFT;
1265                         }
1266                 }
1267
1268                 pte = &ptbl[ptn];
1269                 FLUSH_CPU_DCACHE(pte, tbl_page, count * sizeof(u32 *));
1270                 if (!flush_all)
1271                         flush_ptc_and_tlb_range(smmu, as, iova, pte, tbl_page,
1272                                                 count);
1273
1274                 iova += PAGE_SIZE * count;
1275                 total -= count;
1276
1277                 spin_unlock_irqrestore(&as->lock, flags);
1278         }
1279
1280         if (flush_all)
1281                 flush_ptc_and_tlb_as(as, iova_base,
1282                                      iova_base + npages * PAGE_SIZE);
1283
1284         return err;
1285 }
1286
1287 static int __smmu_iommu_unmap(struct smmu_as *as, dma_addr_t iova,
1288         size_t bytes)
1289 {
1290         int pdn = SMMU_ADDR_TO_PDN(iova);
1291         u32 *pdir = page_address(as->pdir_page);
1292
1293         if (!(pdir[pdn] & _PDE_NEXT))
1294                 return __smmu_iommu_unmap_largepage(as, iova);
1295
1296         return __smmu_iommu_unmap_pages(as, iova, bytes);
1297 }
1298
1299 static size_t smmu_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
1300                                size_t bytes)
1301 {
1302         struct smmu_as *as = domain->priv;
1303         unsigned long flags;
1304         size_t unmapped;
1305
1306         dev_dbg(as->smmu->dev, "[%d] %08lx\n", as->asid, iova);
1307
1308         spin_lock_irqsave(&as->lock, flags);
1309         unmapped = __smmu_iommu_unmap(as, iova, bytes);
1310         spin_unlock_irqrestore(&as->lock, flags);
1311         return unmapped;
1312 }
1313
1314 static phys_addr_t smmu_iommu_iova_to_phys(struct iommu_domain *domain,
1315                                            dma_addr_t iova)
1316 {
1317         struct smmu_as *as = domain->priv;
1318         unsigned long flags;
1319         int pdn = SMMU_ADDR_TO_PDN(iova);
1320         u32 *pdir = page_address(as->pdir_page);
1321         phys_addr_t pa = 0;
1322
1323         spin_lock_irqsave(&as->lock, flags);
1324
1325         if (pdir[pdn] & _PDE_NEXT) {
1326                 u32 *pte;
1327                 unsigned int *count;
1328                 struct page *page;
1329
1330                 pte = locate_pte(as, iova, false, &page, &count);
1331                 if (pte) {
1332                         unsigned long pfn = *pte & SMMU_PFN_MASK;
1333                         pa = PFN_PHYS(pfn);
1334                 }
1335         } else {
1336                 pa = pdir[pdn] << SMMU_PDE_SHIFT;
1337         }
1338
1339         dev_dbg(as->smmu->dev, "iova:%pa pfn:%pa asid:%d\n",
1340                 &iova, &pa, as->asid);
1341
1342         spin_unlock_irqrestore(&as->lock, flags);
1343         return pa;
1344 }
1345
1346 static int smmu_iommu_domain_has_cap(struct iommu_domain *domain,
1347                                      unsigned long cap)
1348 {
1349         return 0;
1350 }
1351
1352 #if defined(CONFIG_DMA_API_DEBUG) || defined(CONFIG_FTRACE)
1353 char *debug_dma_platformdata(struct device *dev)
1354 {
1355         static char buf[21];
1356         struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1357         struct smmu_as *as;
1358         int asid = -1;
1359
1360         if (mapping) {
1361                 as = mapping->domain->priv;
1362                 asid = as->asid;
1363         }
1364
1365         sprintf(buf, "%d", asid);
1366         return buf;
1367 }
1368 #endif
1369
1370 static int smmu_iommu_attach_dev(struct iommu_domain *domain,
1371                                  struct device *dev)
1372 {
1373         struct smmu_as *as = domain->priv;
1374         struct smmu_device *smmu = as->smmu;
1375         struct smmu_client *client, *c;
1376         struct iommu_linear_map *area = NULL;
1377         u64 map, temp;
1378         int err;
1379
1380         map = tegra_smmu_of_get_swgids(dev);
1381         temp = tegra_smmu_fixup_swgids(dev, &area);
1382
1383         if (!map && !temp)
1384                 return -ENODEV;
1385
1386         if (map && temp && map != temp)
1387                 dev_err(dev, "%llx %llx\n", map, temp);
1388
1389         if (!map)
1390                 map = temp;
1391
1392         while (area && area->size) {
1393                 DEFINE_DMA_ATTRS(attrs);
1394                 size_t size = PAGE_ALIGN(area->size);
1395
1396                 dma_set_attr(DMA_ATTR_SKIP_IOVA_GAP, &attrs);
1397                 dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
1398                 err = dma_map_linear_attrs(dev, area->start, size, 0, &attrs);
1399                 if (err == DMA_ERROR_CODE)
1400                         dev_err(dev, "Failed IOVA linear map %pa(%x)\n",
1401                                 &area->start, size);
1402                 else
1403                         dev_info(dev, "IOVA linear map %pa(%x)\n",
1404                                  &area->start, size);
1405
1406                 area++;
1407         }
1408
1409         map &= smmu->swgids;
1410
1411         client = devm_kzalloc(smmu->dev, sizeof(*c), GFP_KERNEL);
1412         if (!client)
1413                 return -ENOMEM;
1414         client->dev = dev;
1415         client->as = as;
1416
1417         err = smmu_client_enable_hwgrp(client, map);
1418         if (err)
1419                 goto err_hwgrp;
1420
1421         spin_lock(&as->client_lock);
1422         list_for_each_entry(c, &as->client, list) {
1423                 if (c->dev == dev) {
1424                         dev_err(smmu->dev,
1425                                 "%s is already attached\n", dev_name(c->dev));
1426                         err = -EINVAL;
1427                         goto err_client;
1428                 }
1429         }
1430         list_add(&client->list, &as->client);
1431         spin_unlock(&as->client_lock);
1432
1433         /*
1434          * Reserve "page zero" for AVP vectors using a common dummy
1435          * page.
1436          */
1437         if (map & SWGID(AVPC)) {
1438                 struct page *page;
1439
1440                 page = as->smmu->avp_vector_page;
1441                 __smmu_iommu_map_pfn(as, 0, page_to_pfn(page), 0);
1442
1443                 pr_debug("Reserve \"page zero\" \
1444                         for AVP vectors using a common dummy\n");
1445         }
1446
1447         dev_dbg(smmu->dev, "%s is attached\n", dev_name(dev));
1448         return 0;
1449
1450 err_client:
1451         smmu_client_disable_hwgrp(client);
1452         spin_unlock(&as->client_lock);
1453 err_hwgrp:
1454         devm_kfree(smmu->dev, client);
1455         return err;
1456 }
1457
1458 static void smmu_iommu_detach_dev(struct iommu_domain *domain,
1459                                   struct device *dev)
1460 {
1461         struct smmu_as *as = domain->priv;
1462         struct smmu_device *smmu = as->smmu;
1463         struct smmu_client *c;
1464
1465         spin_lock(&as->client_lock);
1466
1467         list_for_each_entry(c, &as->client, list) {
1468                 if (c->dev == dev) {
1469                         list_del(&c->list);
1470                         smmu_client_disable_hwgrp(c);
1471                         devm_kfree(smmu->dev, c);
1472                         dev_dbg(smmu->dev,
1473                                 "%s is detached\n", dev_name(c->dev));
1474                         goto out;
1475                 }
1476         }
1477         dev_err(smmu->dev, "Couldn't find %s\n", dev_name(dev));
1478 out:
1479         spin_unlock(&as->client_lock);
1480 }
1481
1482 static int smmu_iommu_domain_init(struct iommu_domain *domain)
1483 {
1484         int i, err = -EAGAIN;
1485         unsigned long flags;
1486         struct smmu_as *as;
1487         struct smmu_device *smmu = smmu_handle;
1488
1489         /* Look for a free AS with lock held */
1490         for  (i = 0; i < smmu->num_as; i++) {
1491                 as = &smmu->as[i];
1492
1493                 if (as->pdir_page)
1494                         continue;
1495
1496                 err = alloc_pdir(as);
1497                 if (!err)
1498                         goto found;
1499
1500                 if (err != -EAGAIN)
1501                         break;
1502         }
1503         if (i == smmu->num_as)
1504                 dev_err(smmu->dev,  "no free AS\n");
1505         return err;
1506
1507 found:
1508         spin_lock_irqsave(&smmu->lock, flags);
1509
1510         /* Update PDIR register */
1511         smmu_write(smmu, SMMU_PTB_ASID_CUR(as->asid), SMMU_PTB_ASID);
1512         smmu_write(smmu,
1513                    SMMU_MK_PDIR(as->pdir_page, as->pdir_attr), SMMU_PTB_DATA);
1514         FLUSH_SMMU_REGS(smmu);
1515
1516         spin_unlock_irqrestore(&smmu->lock, flags);
1517
1518         domain->priv = as;
1519
1520         domain->geometry.aperture_start = smmu->iovmm_base;
1521         domain->geometry.aperture_end   = smmu->iovmm_base +
1522                 smmu->page_count * SMMU_PAGE_SIZE - 1;
1523         domain->geometry.force_aperture = true;
1524
1525         dev_dbg(smmu->dev, "smmu_as@%p\n", as);
1526
1527         return 0;
1528 }
1529
1530 static void smmu_iommu_domain_destroy(struct iommu_domain *domain)
1531 {
1532         struct smmu_as *as = domain->priv;
1533         struct smmu_device *smmu = as->smmu;
1534         unsigned long flags;
1535
1536         spin_lock_irqsave(&as->lock, flags);
1537
1538         if (as->pdir_page) {
1539                 spin_lock(&smmu->lock);
1540                 smmu_write(smmu, SMMU_PTB_ASID_CUR(as->asid), SMMU_PTB_ASID);
1541                 smmu_write(smmu, SMMU_PTB_DATA_RESET_VAL, SMMU_PTB_DATA);
1542                 FLUSH_SMMU_REGS(smmu);
1543                 spin_unlock(&smmu->lock);
1544
1545                 free_pdir(as);
1546         }
1547
1548         if (!list_empty(&as->client)) {
1549                 struct smmu_client *c;
1550
1551                 list_for_each_entry(c, &as->client, list)
1552                         smmu_iommu_detach_dev(domain, c->dev);
1553         }
1554
1555         spin_unlock_irqrestore(&as->lock, flags);
1556
1557         domain->priv = NULL;
1558         dev_dbg(smmu->dev, "smmu_as@%p\n", as);
1559 }
1560
1561 static struct iommu_ops smmu_iommu_ops = {
1562         .domain_init    = smmu_iommu_domain_init,
1563         .domain_destroy = smmu_iommu_domain_destroy,
1564         .attach_dev     = smmu_iommu_attach_dev,
1565         .detach_dev     = smmu_iommu_detach_dev,
1566         .map            = smmu_iommu_map,
1567         .map_pages      = smmu_iommu_map_pages,
1568         .map_sg         = smmu_iommu_map_sg,
1569         .unmap          = smmu_iommu_unmap,
1570         .iova_to_phys   = smmu_iommu_iova_to_phys,
1571         .domain_has_cap = smmu_iommu_domain_has_cap,
1572         .pgsize_bitmap  = SMMU_IOMMU_PGSIZES,
1573 };
1574
1575 /* Should be in the order of enum */
1576 static const char * const smmu_debugfs_mc[] = { "mc", };
1577 static const char * const smmu_debugfs_cache[] = {  "tlb", "ptc", };
1578
1579 static ssize_t smmu_debugfs_stats_write(struct file *file,
1580                                         const char __user *buffer,
1581                                         size_t count, loff_t *pos)
1582 {
1583         struct smmu_debugfs_info *info;
1584         struct smmu_device *smmu;
1585         int i;
1586         enum {
1587                 _OFF = 0,
1588                 _ON,
1589                 _RESET,
1590         };
1591         const char * const command[] = {
1592                 [_OFF]          = "off",
1593                 [_ON]           = "on",
1594                 [_RESET]        = "reset",
1595         };
1596         char str[] = "reset";
1597         u32 val;
1598         size_t offs;
1599
1600         count = min_t(size_t, count, sizeof(str));
1601         if (copy_from_user(str, buffer, count))
1602                 return -EINVAL;
1603
1604         for (i = 0; i < ARRAY_SIZE(command); i++)
1605                 if (strncmp(str, command[i],
1606                             strlen(command[i])) == 0)
1607                         break;
1608
1609         if (i == ARRAY_SIZE(command))
1610                 return -EINVAL;
1611
1612         info = file_inode(file)->i_private;
1613         smmu = info->smmu;
1614
1615         offs = SMMU_CACHE_CONFIG(info->cache);
1616         val = smmu_read(smmu, offs);
1617         switch (i) {
1618         case _OFF:
1619                 val &= ~SMMU_CACHE_CONFIG_STATS_ENABLE;
1620                 val &= ~SMMU_CACHE_CONFIG_STATS_TEST;
1621                 smmu_write(smmu, val, offs);
1622                 break;
1623         case _ON:
1624                 val |= SMMU_CACHE_CONFIG_STATS_ENABLE;
1625                 val &= ~SMMU_CACHE_CONFIG_STATS_TEST;
1626                 smmu_write(smmu, val, offs);
1627                 break;
1628         case _RESET:
1629                 val |= SMMU_CACHE_CONFIG_STATS_TEST;
1630                 smmu_write(smmu, val, offs);
1631                 val &= ~SMMU_CACHE_CONFIG_STATS_TEST;
1632                 smmu_write(smmu, val, offs);
1633                 break;
1634         default:
1635                 BUG();
1636                 break;
1637         }
1638
1639         dev_dbg(smmu->dev, "%s() %08x, %08x @%08x\n", __func__,
1640                 val, smmu_read(smmu, offs), offs);
1641
1642         return count;
1643 }
1644
1645 static int smmu_debugfs_stats_show(struct seq_file *s, void *v)
1646 {
1647         struct smmu_debugfs_info *info = s->private;
1648         struct smmu_device *smmu = info->smmu;
1649         int i;
1650         const char * const stats[] = { "hit", "miss", };
1651
1652
1653         for (i = 0; i < ARRAY_SIZE(stats); i++) {
1654                 u32 val;
1655                 size_t offs;
1656
1657                 offs = SMMU_STATS_CACHE_COUNT(info->mc, info->cache, i);
1658                 val = smmu_read(smmu, offs);
1659                 seq_printf(s, "%s:%08x ", stats[i], val);
1660
1661                 dev_dbg(smmu->dev, "%s() %s %08x @%08x\n", __func__,
1662                         stats[i], val, offs);
1663         }
1664         seq_printf(s, "\n");
1665         return 0;
1666 }
1667
1668 static int smmu_debugfs_stats_open(struct inode *inode, struct file *file)
1669 {
1670         return single_open(file, smmu_debugfs_stats_show, inode->i_private);
1671 }
1672
1673 static const struct file_operations smmu_debugfs_stats_fops = {
1674         .open           = smmu_debugfs_stats_open,
1675         .read           = seq_read,
1676         .llseek         = seq_lseek,
1677         .release        = single_release,
1678         .write          = smmu_debugfs_stats_write,
1679 };
1680
1681 static void smmu_debugfs_delete(struct smmu_device *smmu)
1682 {
1683         debugfs_remove_recursive(smmu->debugfs_root);
1684         kfree(smmu->debugfs_info);
1685 }
1686
1687 static void smmu_debugfs_create(struct smmu_device *smmu)
1688 {
1689         int i;
1690         size_t bytes;
1691         struct dentry *root;
1692
1693         bytes = ARRAY_SIZE(smmu_debugfs_mc) * ARRAY_SIZE(smmu_debugfs_cache) *
1694                 sizeof(*smmu->debugfs_info);
1695         smmu->debugfs_info = kmalloc(bytes, GFP_KERNEL);
1696         if (!smmu->debugfs_info)
1697                 return;
1698
1699         root = debugfs_create_dir(dev_name(smmu->dev), NULL);
1700         if (!root)
1701                 goto err_out;
1702         smmu->debugfs_root = root;
1703
1704         for (i = 0; i < ARRAY_SIZE(smmu_debugfs_mc); i++) {
1705                 int j;
1706                 struct dentry *mc;
1707
1708                 mc = debugfs_create_dir(smmu_debugfs_mc[i], root);
1709                 if (!mc)
1710                         goto err_out;
1711
1712                 for (j = 0; j < ARRAY_SIZE(smmu_debugfs_cache); j++) {
1713                         struct dentry *cache;
1714                         struct smmu_debugfs_info *info;
1715
1716                         info = smmu->debugfs_info;
1717                         info += i * ARRAY_SIZE(smmu_debugfs_mc) + j;
1718                         info->smmu = smmu;
1719                         info->mc = i;
1720                         info->cache = j;
1721
1722                         cache = debugfs_create_file(smmu_debugfs_cache[j],
1723                                                     S_IWUSR | S_IRUSR, mc,
1724                                                     (void *)info,
1725                                                     &smmu_debugfs_stats_fops);
1726                         if (!cache)
1727                                 goto err_out;
1728                 }
1729         }
1730
1731         debugfs_create_size_t("flush_all_threshold_pages", S_IWUSR | S_IRUSR,
1732                               root, &smmu_flush_all_th_pages);
1733         return;
1734
1735 err_out:
1736         smmu_debugfs_delete(smmu);
1737 }
1738
1739 int tegra_smmu_suspend(struct device *dev)
1740 {
1741         int i;
1742         struct smmu_device *smmu = dev_get_drvdata(dev);
1743
1744         for (i = 0; i < smmu->num_translation_enable; i++)
1745                 smmu->translation_enable[i] = smmu_read(smmu,
1746                                 SMMU_TRANSLATION_ENABLE_0 + i * sizeof(u32));
1747
1748         for (i = 0; i < smmu->num_asid_security; i++)
1749                 smmu->asid_security[i] =
1750                         smmu_read(smmu, smmu_asid_security_ofs[i]);
1751
1752         return 0;
1753 }
1754 EXPORT_SYMBOL(tegra_smmu_suspend);
1755
1756 int tegra_smmu_save(void)
1757 {
1758         return tegra_smmu_suspend(save_smmu_device);
1759 }
1760
1761 struct device *get_smmu_device(void)
1762 {
1763         return save_smmu_device;
1764 }
1765 EXPORT_SYMBOL(get_smmu_device);
1766
1767 int tegra_smmu_resume(struct device *dev)
1768 {
1769         struct smmu_device *smmu = dev_get_drvdata(dev);
1770         unsigned long flags;
1771
1772         spin_lock_irqsave(&smmu->lock, flags);
1773         smmu_setup_regs(smmu);
1774         spin_unlock_irqrestore(&smmu->lock, flags);
1775         return 0;
1776 }
1777 EXPORT_SYMBOL(tegra_smmu_resume);
1778
1779 int tegra_smmu_restore(void)
1780 {
1781         return tegra_smmu_resume(save_smmu_device);
1782 }
1783
1784 static int tegra_smmu_probe(struct platform_device *pdev)
1785 {
1786         struct smmu_device *smmu;
1787         struct resource *regs, *regs2, *window;
1788         struct device *dev = &pdev->dev;
1789         int i, num_as;
1790         size_t bytes;
1791
1792         if (smmu_handle)
1793                 return -EIO;
1794
1795         BUILD_BUG_ON(PAGE_SHIFT != SMMU_PAGE_SHIFT);
1796
1797         save_smmu_device = dev;
1798
1799         regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1800         regs2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1801         window = tegra_smmu_window(0);
1802         if (!regs || !regs2 || !window) {
1803                 dev_err(dev, "No SMMU resources\n");
1804                 return -ENODEV;
1805         }
1806
1807         num_as = SMMU_NUM_ASIDS;
1808         if (tegra_get_chipid() == TEGRA_CHIPID_TEGRA12)
1809                 num_as = SMMU_NUM_ASIDS_TEGRA12;
1810
1811         bytes = sizeof(*smmu) + num_as * sizeof(*smmu->as);
1812         smmu = devm_kzalloc(dev, bytes, GFP_KERNEL);
1813         if (!smmu) {
1814                 dev_err(dev, "failed to allocate smmu_device\n");
1815                 return -ENOMEM;
1816         }
1817
1818         smmu->dev = dev;
1819         smmu->num_as = num_as;
1820
1821         smmu->iovmm_base = (unsigned long)window->start;
1822         smmu->page_count = resource_size(window) >> SMMU_PAGE_SHIFT;
1823         smmu->regs = devm_ioremap(dev, regs->start, resource_size(regs));
1824         smmu->regs_ahbarb = devm_ioremap(dev, regs2->start,
1825                                          resource_size(regs2));
1826         if (!smmu->regs || !smmu->regs_ahbarb) {
1827                 dev_err(dev, "failed to remap SMMU registers\n");
1828                 return -ENXIO;
1829         }
1830
1831         if (IS_ENABLED(CONFIG_ARCH_TEGRA_3x_SOC) &&
1832             (tegra_get_chipid() == TEGRA_CHIPID_TEGRA3))
1833                 smmu->swgids = 0x00000000000779ff;
1834         if (IS_ENABLED(CONFIG_ARCH_TEGRA_11x_SOC) &&
1835             (tegra_get_chipid() == TEGRA_CHIPID_TEGRA11))
1836                 smmu->swgids = 0x0000000001b659fe;
1837         if (IS_ENABLED(CONFIG_ARCH_TEGRA_14x_SOC) &&
1838             (tegra_get_chipid() == TEGRA_CHIPID_TEGRA14))
1839                 smmu->swgids = 0x0000000001865bfe;
1840         if (IS_ENABLED(CONFIG_ARCH_TEGRA_12x_SOC) &&
1841             (tegra_get_chipid() == TEGRA_CHIPID_TEGRA12)) {
1842                 smmu->swgids = 0x00000001fffecdcf;
1843                 smmu->num_translation_enable = 4;
1844                 smmu->num_asid_security = 8;
1845                 smmu->ptc_cache_size = SZ_32K;
1846         } else {
1847                 smmu->num_translation_enable = 3;
1848                 smmu->num_asid_security = 1;
1849                 smmu->ptc_cache_size = SZ_16K;
1850         }
1851
1852         for (i = 0; i < smmu->num_translation_enable; i++)
1853                 smmu->translation_enable[i] = ~0;
1854
1855         for (i = 0; i < smmu->num_as; i++) {
1856                 struct smmu_as *as = &smmu->as[i];
1857
1858                 as->smmu = smmu;
1859                 as->asid = i;
1860                 as->pdir_attr = _PDIR_ATTR;
1861                 as->pde_attr = _PDE_ATTR;
1862                 as->pte_attr = _PTE_ATTR;
1863
1864                 spin_lock_init(&as->lock);
1865                 spin_lock_init(&as->client_lock);
1866                 INIT_LIST_HEAD(&as->client);
1867         }
1868         spin_lock_init(&smmu->lock);
1869         smmu_setup_regs(smmu);
1870         platform_set_drvdata(pdev, smmu);
1871
1872         smmu->avp_vector_page = alloc_page(GFP_KERNEL);
1873         if (!smmu->avp_vector_page)
1874                 return -ENOMEM;
1875
1876         smmu_debugfs_create(smmu);
1877         smmu_handle = smmu;
1878         bus_set_iommu(&platform_bus_type, &smmu_iommu_ops);
1879
1880         dev_info(dev, "Loaded Tegra IOMMU driver\n");
1881         return 0;
1882 }
1883
1884 static int tegra_smmu_remove(struct platform_device *pdev)
1885 {
1886         struct smmu_device *smmu = platform_get_drvdata(pdev);
1887         int i;
1888
1889         smmu_debugfs_delete(smmu);
1890
1891         smmu_write(smmu, SMMU_CONFIG_DISABLE, SMMU_CONFIG);
1892         for (i = 0; i < smmu->num_as; i++)
1893                 free_pdir(&smmu->as[i]);
1894         __free_page(smmu->avp_vector_page);
1895         smmu_handle = NULL;
1896         return 0;
1897 }
1898
1899 const struct dev_pm_ops tegra_smmu_pm_ops = {
1900         .suspend        = tegra_smmu_suspend,
1901         .resume         = tegra_smmu_resume,
1902 };
1903
1904 static struct platform_driver tegra_smmu_driver = {
1905         .probe          = tegra_smmu_probe,
1906         .remove         = tegra_smmu_remove,
1907         .driver = {
1908                 .owner  = THIS_MODULE,
1909                 .name   = "tegra_smmu",
1910                 .pm     = &tegra_smmu_pm_ops,
1911         },
1912 };
1913
1914 static int tegra_smmu_device_notifier(struct notifier_block *nb,
1915                                       unsigned long event, void *_dev)
1916 {
1917         struct dma_iommu_mapping *map;
1918         struct device *dev = _dev;
1919
1920         map = tegra_smmu_get_map(dev, tegra_smmu_of_get_swgids(dev));
1921         if (!map)
1922                 return NOTIFY_DONE;
1923
1924         switch (event) {
1925         case BUS_NOTIFY_BIND_DRIVER:
1926                 if (get_dma_ops(dev) != &arm_dma_ops)
1927                         break;
1928                 /* FALLTHROUGH */
1929         case BUS_NOTIFY_ADD_DEVICE:
1930                 if (strncmp(dev_name(dev), "tegra_smmu", 10) == 0)
1931                         break;
1932
1933                 if (!smmu_handle) {
1934                         dev_warn(dev, "No map yet available\n");
1935                         break;
1936                 }
1937
1938                 WARN_ON(to_dma_iommu_mapping(dev) == map);
1939
1940                 if (arm_iommu_attach_device(dev, map)) {
1941                         dev_err(dev, "Failed to attach %s\n", dev_name(dev));
1942                         arm_iommu_release_mapping(map);
1943                         break;
1944                 }
1945                 dev_dbg(dev, "Attached %s to map %p\n", dev_name(dev), map);
1946                 break;
1947         case BUS_NOTIFY_DEL_DEVICE:
1948                 if (dev->driver)
1949                         break;
1950                 /* FALLTHROUGH */
1951         case BUS_NOTIFY_UNBOUND_DRIVER:
1952                 WARN_ON(!to_dma_iommu_mapping(dev));
1953
1954                 dev_dbg(dev, "Detaching %s from map %p\n", dev_name(dev),
1955                         to_dma_iommu_mapping(dev));
1956                 arm_iommu_detach_device(dev);
1957                 break;
1958         default:
1959                 break;
1960         }
1961         return NOTIFY_DONE;
1962 }
1963
1964 static struct notifier_block tegra_smmu_device_nb = {
1965         .notifier_call = tegra_smmu_device_notifier,
1966 };
1967
1968 static int tegra_smmu_init(void)
1969 {
1970         int err;
1971
1972         err = platform_driver_register(&tegra_smmu_driver);
1973         if (err)
1974                 return err;
1975         if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
1976                 bus_register_notifier(&platform_bus_type,
1977                                       &tegra_smmu_device_nb);
1978         return 0;
1979 }
1980
1981 static int tegra_smmu_remove_map(struct device *dev, void *data)
1982 {
1983         struct dma_iommu_mapping *map = to_dma_iommu_mapping(dev);
1984         if (map)
1985                 arm_iommu_release_mapping(map);
1986         return 0;
1987 }
1988
1989 static void __exit tegra_smmu_exit(void)
1990 {
1991         if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) {
1992                 bus_for_each_dev(&platform_bus_type, NULL, NULL,
1993                                  tegra_smmu_remove_map);
1994                 bus_unregister_notifier(&platform_bus_type,
1995                                         &tegra_smmu_device_nb);
1996         }
1997         platform_driver_unregister(&tegra_smmu_driver);
1998 }
1999
2000 core_initcall(tegra_smmu_init);
2001 module_exit(tegra_smmu_exit);
2002
2003 MODULE_DESCRIPTION("IOMMU API for SMMU in Tegra SoC");
2004 MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>");
2005 MODULE_LICENSE("GPL v2");