blob: 2c36370a168984a6d3a8bc96638916333c3168e6 [file] [log] [blame]
Vineet Gupta95d69762013-01-18 15:12:19 +05301/*
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +05302 * ARC Cache Management
Vineet Gupta95d69762013-01-18 15:12:19 +05303 *
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +05304 * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
Vineet Gupta95d69762013-01-18 15:12:19 +05305 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
Vineet Gupta95d69762013-01-18 15:12:19 +053010 */
11
12#include <linux/module.h>
13#include <linux/mm.h>
14#include <linux/sched.h>
15#include <linux/cache.h>
16#include <linux/mmu_context.h>
17#include <linux/syscalls.h>
18#include <linux/uaccess.h>
Vineet Gupta4102b532013-05-09 21:54:51 +053019#include <linux/pagemap.h>
Vineet Gupta95d69762013-01-18 15:12:19 +053020#include <asm/cacheflush.h>
21#include <asm/cachectl.h>
22#include <asm/setup.h>
23
Vineet Gupta795f4552015-04-03 12:37:07 +030024static int l2_line_sz;
Vineet Guptacf986d42016-10-13 15:58:59 -070025static int ioc_exists;
Vineet Gupta23cb1f62016-11-28 09:18:21 -080026int slc_enable = 1, ioc_enable = 0;
Vineet Guptadeaf7562015-10-24 19:31:16 +053027unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */
Vineet Gupta26c01c42016-08-26 15:41:29 -070028unsigned long perip_end = 0xFFFFFFFF; /* legacy value */
Vineet Gupta795f4552015-04-03 12:37:07 +030029
Vineet Gupta28b4af72015-09-14 18:43:42 -070030void (*_cache_line_loop_ic_fn)(phys_addr_t paddr, unsigned long vaddr,
Vineet Guptabcc4d652015-06-04 14:39:15 +053031 unsigned long sz, const int cacheop);
32
Vineet Guptaf5db19e2016-03-16 15:04:39 +053033void (*__dma_cache_wback_inv)(phys_addr_t start, unsigned long sz);
34void (*__dma_cache_inv)(phys_addr_t start, unsigned long sz);
35void (*__dma_cache_wback)(phys_addr_t start, unsigned long sz);
Alexey Brodkinf2b0b252015-05-25 19:54:28 +030036
Vineet Guptac3441ed2014-02-24 11:42:50 +080037char *arc_cache_mumbojumbo(int c, char *buf, int len)
Vineet Guptaaf617422013-01-18 15:12:24 +053038{
39 int n = 0;
Vineet Guptad1f317d2015-04-06 17:23:57 +053040 struct cpuinfo_arc_cache *p;
Vineet Guptaaf617422013-01-18 15:12:24 +053041
Vineet Guptada40ff42014-06-27 15:49:47 +053042#define PR_CACHE(p, cfg, str) \
Vineet Guptaf64915b2016-12-19 11:24:08 -080043 if (!(p)->line_len) \
Vineet Guptaaf617422013-01-18 15:12:24 +053044 n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \
45 else \
46 n += scnprintf(buf + n, len - n, \
Vineet Guptada40ff42014-06-27 15:49:47 +053047 str"\t\t: %uK, %dway/set, %uB Line, %s%s%s\n", \
48 (p)->sz_k, (p)->assoc, (p)->line_len, \
49 (p)->vipt ? "VIPT" : "PIPT", \
50 (p)->alias ? " aliasing" : "", \
Vineet Gupta964cf282015-10-02 19:20:27 +053051 IS_USED_CFG(cfg));
Vineet Guptaaf617422013-01-18 15:12:24 +053052
Vineet Guptada40ff42014-06-27 15:49:47 +053053 PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache");
54 PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");
Vineet Guptaaf617422013-01-18 15:12:24 +053055
Vineet Guptad1f317d2015-04-06 17:23:57 +053056 p = &cpuinfo_arc700[c].slc;
Vineet Guptaf64915b2016-12-19 11:24:08 -080057 if (p->line_len)
Vineet Guptad1f317d2015-04-06 17:23:57 +053058 n += scnprintf(buf + n, len - n,
Vineet Gupta79335a22015-06-04 18:30:23 +053059 "SLC\t\t: %uK, %uB Line%s\n",
60 p->sz_k, p->line_len, IS_USED_RUN(slc_enable));
Vineet Guptad1f317d2015-04-06 17:23:57 +053061
Vineet Gupta711c1f22016-10-13 15:53:02 -070062 n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n",
63 perip_base,
64 IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency "));
Alexey Brodkinf2b0b252015-05-25 19:54:28 +030065
Vineet Guptaaf617422013-01-18 15:12:24 +053066 return buf;
67}
68
Vineet Gupta95d69762013-01-18 15:12:19 +053069/*
70 * Read the Cache Build Confuration Registers, Decode them and save into
71 * the cpuinfo structure for later use.
72 * No Validation done here, simply read/convert the BCRs
73 */
Vineet Guptafd0881a22015-08-21 15:06:43 +053074static void read_decode_cache_bcr_arcv2(int cpu)
Vineet Gupta95d69762013-01-18 15:12:19 +053075{
Vineet Guptafd0881a22015-08-21 15:06:43 +053076 struct cpuinfo_arc_cache *p_slc = &cpuinfo_arc700[cpu].slc;
Vineet Guptad1f317d2015-04-06 17:23:57 +053077 struct bcr_generic sbcr;
78
79 struct bcr_slc_cfg {
80#ifdef CONFIG_CPU_BIG_ENDIAN
81 unsigned int pad:24, way:2, lsz:2, sz:4;
82#else
83 unsigned int sz:4, lsz:2, way:2, pad:24;
84#endif
85 } slc_cfg;
86
Alexey Brodkinf2b0b252015-05-25 19:54:28 +030087 struct bcr_clust_cfg {
88#ifdef CONFIG_CPU_BIG_ENDIAN
89 unsigned int pad:7, c:1, num_entries:8, num_cores:8, ver:8;
90#else
91 unsigned int ver:8, num_cores:8, num_entries:8, c:1, pad:7;
92#endif
93 } cbcr;
94
Vineet Gupta26c01c42016-08-26 15:41:29 -070095 struct bcr_volatile {
96#ifdef CONFIG_CPU_BIG_ENDIAN
97 unsigned int start:4, limit:4, pad:22, order:1, disable:1;
98#else
99 unsigned int disable:1, order:1, pad:22, limit:4, start:4;
100#endif
101 } vol;
102
103
Vineet Guptafd0881a22015-08-21 15:06:43 +0530104 READ_BCR(ARC_REG_SLC_BCR, sbcr);
105 if (sbcr.ver) {
106 READ_BCR(ARC_REG_SLC_CFG, slc_cfg);
Vineet Guptafd0881a22015-08-21 15:06:43 +0530107 p_slc->sz_k = 128 << slc_cfg.sz;
108 l2_line_sz = p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64;
109 }
110
111 READ_BCR(ARC_REG_CLUSTER_BCR, cbcr);
Vineet Guptacf986d42016-10-13 15:58:59 -0700112 if (cbcr.c)
Vineet Guptafd0881a22015-08-21 15:06:43 +0530113 ioc_exists = 1;
Vineet Guptacf986d42016-10-13 15:58:59 -0700114 else
115 ioc_enable = 0;
Vineet Guptadeaf7562015-10-24 19:31:16 +0530116
Vineet Gupta26c01c42016-08-26 15:41:29 -0700117 /* HS 2.0 didn't have AUX_VOL */
118 if (cpuinfo_arc700[cpu].core.family > 0x51) {
119 READ_BCR(AUX_VOL, vol);
120 perip_base = vol.start << 28;
121 /* HS 3.0 has limit and strict-ordering fields */
122 if (cpuinfo_arc700[cpu].core.family > 0x52)
123 perip_end = (vol.limit << 28) - 1;
124 }
Vineet Guptafd0881a22015-08-21 15:06:43 +0530125}
126
127void read_decode_cache_bcr(void)
128{
129 struct cpuinfo_arc_cache *p_ic, *p_dc;
130 unsigned int cpu = smp_processor_id();
131 struct bcr_cache {
132#ifdef CONFIG_CPU_BIG_ENDIAN
133 unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
134#else
135 unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
136#endif
137 } ibcr, dbcr;
138
Vineet Gupta95d69762013-01-18 15:12:19 +0530139 p_ic = &cpuinfo_arc700[cpu].icache;
140 READ_BCR(ARC_REG_IC_BCR, ibcr);
141
Vineet Guptada40ff42014-06-27 15:49:47 +0530142 if (!ibcr.ver)
143 goto dc_chk;
144
Vineet Guptad1f317d2015-04-06 17:23:57 +0530145 if (ibcr.ver <= 3) {
146 BUG_ON(ibcr.config != 3);
147 p_ic->assoc = 2; /* Fixed to 2w set assoc */
148 } else if (ibcr.ver >= 4) {
149 p_ic->assoc = 1 << ibcr.config; /* 1,2,4,8 */
150 }
151
Vineet Gupta95d69762013-01-18 15:12:19 +0530152 p_ic->line_len = 8 << ibcr.line_len;
Vineet Guptada40ff42014-06-27 15:49:47 +0530153 p_ic->sz_k = 1 << (ibcr.sz - 1);
Vineet Guptada40ff42014-06-27 15:49:47 +0530154 p_ic->vipt = 1;
155 p_ic->alias = p_ic->sz_k/p_ic->assoc/TO_KB(PAGE_SIZE) > 1;
Vineet Gupta95d69762013-01-18 15:12:19 +0530156
Vineet Guptada40ff42014-06-27 15:49:47 +0530157dc_chk:
Vineet Gupta95d69762013-01-18 15:12:19 +0530158 p_dc = &cpuinfo_arc700[cpu].dcache;
159 READ_BCR(ARC_REG_DC_BCR, dbcr);
160
Vineet Guptada40ff42014-06-27 15:49:47 +0530161 if (!dbcr.ver)
Vineet Guptad1f317d2015-04-06 17:23:57 +0530162 goto slc_chk;
Vineet Guptada40ff42014-06-27 15:49:47 +0530163
Vineet Guptad1f317d2015-04-06 17:23:57 +0530164 if (dbcr.ver <= 3) {
165 BUG_ON(dbcr.config != 2);
166 p_dc->assoc = 4; /* Fixed to 4w set assoc */
167 p_dc->vipt = 1;
168 p_dc->alias = p_dc->sz_k/p_dc->assoc/TO_KB(PAGE_SIZE) > 1;
169 } else if (dbcr.ver >= 4) {
170 p_dc->assoc = 1 << dbcr.config; /* 1,2,4,8 */
171 p_dc->vipt = 0;
172 p_dc->alias = 0; /* PIPT so can't VIPT alias */
173 }
174
Vineet Gupta95d69762013-01-18 15:12:19 +0530175 p_dc->line_len = 16 << dbcr.line_len;
Vineet Guptada40ff42014-06-27 15:49:47 +0530176 p_dc->sz_k = 1 << (dbcr.sz - 1);
Vineet Guptad1f317d2015-04-06 17:23:57 +0530177
178slc_chk:
Vineet Guptafd0881a22015-08-21 15:06:43 +0530179 if (is_isa_arcv2())
180 read_decode_cache_bcr_arcv2(cpu);
Vineet Gupta95d69762013-01-18 15:12:19 +0530181}
182
183/*
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530184 * Line Operation on {I,D}-Cache
Vineet Gupta95d69762013-01-18 15:12:19 +0530185 */
Vineet Gupta95d69762013-01-18 15:12:19 +0530186
187#define OP_INV 0x1
188#define OP_FLUSH 0x2
189#define OP_FLUSH_N_INV 0x3
Vineet Guptabd129762013-09-05 13:43:03 +0530190#define OP_INV_IC 0x4
191
192/*
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530193 * I-Cache Aliasing in ARC700 VIPT caches (MMU v1-v3)
194 *
195 * ARC VIPT I-cache uses vaddr to index into cache and paddr to match the tag.
196 * The orig Cache Management Module "CDU" only required paddr to invalidate a
197 * certain line since it sufficed as index in Non-Aliasing VIPT cache-geometry.
198 * Infact for distinct V1,V2,P: all of {V1-P},{V2-P},{P-P} would end up fetching
199 * the exact same line.
200 *
201 * However for larger Caches (way-size > page-size) - i.e. in Aliasing config,
202 * paddr alone could not be used to correctly index the cache.
203 *
204 * ------------------
205 * MMU v1/v2 (Fixed Page Size 8k)
206 * ------------------
207 * The solution was to provide CDU with these additonal vaddr bits. These
208 * would be bits [x:13], x would depend on cache-geometry, 13 comes from
209 * standard page size of 8k.
210 * H/w folks chose [17:13] to be a future safe range, and moreso these 5 bits
211 * of vaddr could easily be "stuffed" in the paddr as bits [4:0] since the
212 * orig 5 bits of paddr were anyways ignored by CDU line ops, as they
213 * represent the offset within cache-line. The adv of using this "clumsy"
214 * interface for additional info was no new reg was needed in CDU programming
215 * model.
216 *
217 * 17:13 represented the max num of bits passable, actual bits needed were
218 * fewer, based on the num-of-aliases possible.
219 * -for 2 alias possibility, only bit 13 needed (32K cache)
220 * -for 4 alias possibility, bits 14:13 needed (64K cache)
221 *
222 * ------------------
223 * MMU v3
224 * ------------------
225 * This ver of MMU supports variable page sizes (1k-16k): although Linux will
226 * only support 8k (default), 16k and 4k.
Andrea Gelmini25474762016-05-21 13:45:35 +0200227 * However from hardware perspective, smaller page sizes aggravate aliasing
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530228 * meaning more vaddr bits needed to disambiguate the cache-line-op ;
229 * the existing scheme of piggybacking won't work for certain configurations.
230 * Two new registers IC_PTAG and DC_PTAG inttoduced.
231 * "tag" bits are provided in PTAG, index bits in existing IVIL/IVDL/FLDL regs
Vineet Guptabd129762013-09-05 13:43:03 +0530232 */
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530233
Vineet Gupta11e14892014-08-04 08:32:31 -0700234static inline
Vineet Gupta28b4af72015-09-14 18:43:42 -0700235void __cache_line_loop_v2(phys_addr_t paddr, unsigned long vaddr,
Vineet Gupta11e14892014-08-04 08:32:31 -0700236 unsigned long sz, const int op)
Vineet Guptabd129762013-09-05 13:43:03 +0530237{
Vineet Gupta11e14892014-08-04 08:32:31 -0700238 unsigned int aux_cmd;
Vineet Guptabd129762013-09-05 13:43:03 +0530239 int num_lines;
Vineet Gupta11e14892014-08-04 08:32:31 -0700240 const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
Vineet Guptabd129762013-09-05 13:43:03 +0530241
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530242 if (op == OP_INV_IC) {
Vineet Guptabd129762013-09-05 13:43:03 +0530243 aux_cmd = ARC_REG_IC_IVIL;
Vineet Gupta11e14892014-08-04 08:32:31 -0700244 } else {
Vineet Guptabd129762013-09-05 13:43:03 +0530245 /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530246 aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
Vineet Guptabd129762013-09-05 13:43:03 +0530247 }
248
249 /* Ensure we properly floor/ceil the non-line aligned/sized requests
250 * and have @paddr - aligned to cache line and integral @num_lines.
251 * This however can be avoided for page sized since:
252 * -@paddr will be cache-line aligned already (being page aligned)
253 * -@sz will be integral multiple of line size (being page sized).
254 */
Vineet Gupta11e14892014-08-04 08:32:31 -0700255 if (!full_page) {
Vineet Guptabd129762013-09-05 13:43:03 +0530256 sz += paddr & ~CACHE_LINE_MASK;
257 paddr &= CACHE_LINE_MASK;
258 vaddr &= CACHE_LINE_MASK;
259 }
260
261 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
262
Vineet Guptabd129762013-09-05 13:43:03 +0530263 /* MMUv2 and before: paddr contains stuffed vaddrs bits */
264 paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
Vineet Guptabd129762013-09-05 13:43:03 +0530265
266 while (num_lines-- > 0) {
Vineet Gupta11e14892014-08-04 08:32:31 -0700267 write_aux_reg(aux_cmd, paddr);
268 paddr += L1_CACHE_BYTES;
269 }
270}
271
Vineet Gupta5a364c22015-02-06 18:44:57 +0300272/*
273 * For ARC700 MMUv3 I-cache and D-cache flushes
Vineet Guptafa84d732017-01-04 12:02:44 -0800274 * - ARC700 programming model requires paddr and vaddr be passed in seperate
275 * AUX registers (*_IV*L and *_PTAG respectively) irrespective of whether the
276 * caches actually alias or not.
277 * - For HS38, only the aliasing I-cache configuration uses the PTAG reg
278 * (non aliasing I-cache version doesn't; while D-cache can't possibly alias)
Vineet Gupta5a364c22015-02-06 18:44:57 +0300279 */
Vineet Gupta11e14892014-08-04 08:32:31 -0700280static inline
Vineet Gupta28b4af72015-09-14 18:43:42 -0700281void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
Vineet Gupta11e14892014-08-04 08:32:31 -0700282 unsigned long sz, const int op)
283{
284 unsigned int aux_cmd, aux_tag;
285 int num_lines;
286 const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
287
288 if (op == OP_INV_IC) {
289 aux_cmd = ARC_REG_IC_IVIL;
290 aux_tag = ARC_REG_IC_PTAG;
291 } else {
292 aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
293 aux_tag = ARC_REG_DC_PTAG;
294 }
295
296 /* Ensure we properly floor/ceil the non-line aligned/sized requests
297 * and have @paddr - aligned to cache line and integral @num_lines.
298 * This however can be avoided for page sized since:
299 * -@paddr will be cache-line aligned already (being page aligned)
300 * -@sz will be integral multiple of line size (being page sized).
301 */
302 if (!full_page) {
303 sz += paddr & ~CACHE_LINE_MASK;
304 paddr &= CACHE_LINE_MASK;
305 vaddr &= CACHE_LINE_MASK;
306 }
307 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
308
309 /*
310 * MMUv3, cache ops require paddr in PTAG reg
311 * if V-P const for loop, PTAG can be written once outside loop
312 */
313 if (full_page)
314 write_aux_reg(aux_tag, paddr);
315
Vineet Gupta5a364c22015-02-06 18:44:57 +0300316 /*
317 * This is technically for MMU v4, using the MMU v3 programming model
Andrea Gelmini25474762016-05-21 13:45:35 +0200318 * Special work for HS38 aliasing I-cache configuration with PAE40
Vineet Gupta5a364c22015-02-06 18:44:57 +0300319 * - upper 8 bits of paddr need to be written into PTAG_HI
320 * - (and needs to be written before the lower 32 bits)
321 * Note that PTAG_HI is hoisted outside the line loop
322 */
323 if (is_pae40_enabled() && op == OP_INV_IC)
324 write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
325
Vineet Gupta11e14892014-08-04 08:32:31 -0700326 while (num_lines-- > 0) {
327 if (!full_page) {
Vineet Guptad4599ba2013-09-05 14:45:51 +0530328 write_aux_reg(aux_tag, paddr);
329 paddr += L1_CACHE_BYTES;
330 }
Vineet Guptabd129762013-09-05 13:43:03 +0530331
332 write_aux_reg(aux_cmd, vaddr);
333 vaddr += L1_CACHE_BYTES;
Vineet Guptabd129762013-09-05 13:43:03 +0530334 }
335}
Vineet Gupta95d69762013-01-18 15:12:19 +0530336
Vineet Guptad1f317d2015-04-06 17:23:57 +0530337/*
Vineet Gupta5a364c22015-02-06 18:44:57 +0300338 * In HS38x (MMU v4), I-cache is VIPT (can alias), D-cache is PIPT
339 * Here's how cache ops are implemented
Vineet Guptad1f317d2015-04-06 17:23:57 +0530340 *
Vineet Gupta5a364c22015-02-06 18:44:57 +0300341 * - D-cache: only paddr needed (in DC_IVDL/DC_FLDL)
342 * - I-cache Non Aliasing: Despite VIPT, only paddr needed (in IC_IVIL)
343 * - I-cache Aliasing: Both vaddr and paddr needed (in IC_IVIL, IC_PTAG
344 * respectively, similar to MMU v3 programming model, hence
345 * __cache_line_loop_v3() is used)
346 *
347 * If PAE40 is enabled, independent of aliasing considerations, the higher bits
348 * needs to be written into PTAG_HI
Vineet Guptad1f317d2015-04-06 17:23:57 +0530349 */
350static inline
Vineet Gupta28b4af72015-09-14 18:43:42 -0700351void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
Vineet Guptad1f317d2015-04-06 17:23:57 +0530352 unsigned long sz, const int cacheop)
353{
354 unsigned int aux_cmd;
355 int num_lines;
356 const int full_page_op = __builtin_constant_p(sz) && sz == PAGE_SIZE;
357
358 if (cacheop == OP_INV_IC) {
359 aux_cmd = ARC_REG_IC_IVIL;
360 } else {
361 /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
362 aux_cmd = cacheop & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
363 }
364
365 /* Ensure we properly floor/ceil the non-line aligned/sized requests
366 * and have @paddr - aligned to cache line and integral @num_lines.
367 * This however can be avoided for page sized since:
368 * -@paddr will be cache-line aligned already (being page aligned)
369 * -@sz will be integral multiple of line size (being page sized).
370 */
371 if (!full_page_op) {
372 sz += paddr & ~CACHE_LINE_MASK;
373 paddr &= CACHE_LINE_MASK;
374 }
375
376 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
377
Vineet Gupta5a364c22015-02-06 18:44:57 +0300378 /*
379 * For HS38 PAE40 configuration
380 * - upper 8 bits of paddr need to be written into PTAG_HI
381 * - (and needs to be written before the lower 32 bits)
382 */
383 if (is_pae40_enabled()) {
384 if (cacheop == OP_INV_IC)
385 /*
386 * Non aliasing I-cache in HS38,
387 * aliasing I-cache handled in __cache_line_loop_v3()
388 */
389 write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
390 else
391 write_aux_reg(ARC_REG_DC_PTAG_HI, (u64)paddr >> 32);
392 }
393
Vineet Guptad1f317d2015-04-06 17:23:57 +0530394 while (num_lines-- > 0) {
395 write_aux_reg(aux_cmd, paddr);
396 paddr += L1_CACHE_BYTES;
397 }
398}
399
Vineet Gupta11e14892014-08-04 08:32:31 -0700400#if (CONFIG_ARC_MMU_VER < 3)
401#define __cache_line_loop __cache_line_loop_v2
402#elif (CONFIG_ARC_MMU_VER == 3)
403#define __cache_line_loop __cache_line_loop_v3
Vineet Guptad1f317d2015-04-06 17:23:57 +0530404#elif (CONFIG_ARC_MMU_VER > 3)
405#define __cache_line_loop __cache_line_loop_v4
Vineet Gupta11e14892014-08-04 08:32:31 -0700406#endif
407
Vineet Gupta95d69762013-01-18 15:12:19 +0530408#ifdef CONFIG_ARC_HAS_DCACHE
409
410/***************************************************************
411 * Machine specific helpers for Entire D-Cache or Per Line ops
412 */
413
Vineet Gupta6c310682015-06-04 08:53:47 +0530414static inline void __before_dc_op(const int op)
Vineet Gupta95d69762013-01-18 15:12:19 +0530415{
Vineet Gupta1b1a22b2014-06-29 19:03:58 +0530416 if (op == OP_FLUSH_N_INV) {
417 /* Dcache provides 2 cmd: FLUSH or INV
418 * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
419 * flush-n-inv is achieved by INV cmd but with IM=1
420 * So toggle INV sub-mode depending on op request and default
421 */
Vineet Gupta6c310682015-06-04 08:53:47 +0530422 const unsigned int ctl = ARC_REG_DC_CTRL;
423 write_aux_reg(ctl, read_aux_reg(ctl) | DC_CTRL_INV_MODE_FLUSH);
Vineet Gupta1b1a22b2014-06-29 19:03:58 +0530424 }
Vineet Gupta1b1a22b2014-06-29 19:03:58 +0530425}
426
Vineet Gupta6c310682015-06-04 08:53:47 +0530427static inline void __after_dc_op(const int op)
Vineet Gupta1b1a22b2014-06-29 19:03:58 +0530428{
Vineet Gupta6c310682015-06-04 08:53:47 +0530429 if (op & OP_FLUSH) {
430 const unsigned int ctl = ARC_REG_DC_CTRL;
431 unsigned int reg;
Vineet Gupta1b1a22b2014-06-29 19:03:58 +0530432
Vineet Gupta6c310682015-06-04 08:53:47 +0530433 /* flush / flush-n-inv both wait */
434 while ((reg = read_aux_reg(ctl)) & DC_CTRL_FLUSH_STATUS)
435 ;
436
437 /* Switch back to default Invalidate mode */
438 if (op == OP_FLUSH_N_INV)
439 write_aux_reg(ctl, reg & ~DC_CTRL_INV_MODE_FLUSH);
440 }
Vineet Gupta95d69762013-01-18 15:12:19 +0530441}
442
443/*
444 * Operation on Entire D-Cache
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530445 * @op = {OP_INV, OP_FLUSH, OP_FLUSH_N_INV}
Vineet Gupta95d69762013-01-18 15:12:19 +0530446 * Note that constant propagation ensures all the checks are gone
447 * in generated code
448 */
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530449static inline void __dc_entire_op(const int op)
Vineet Gupta95d69762013-01-18 15:12:19 +0530450{
Vineet Gupta95d69762013-01-18 15:12:19 +0530451 int aux;
452
Vineet Gupta6c310682015-06-04 08:53:47 +0530453 __before_dc_op(op);
Vineet Gupta95d69762013-01-18 15:12:19 +0530454
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530455 if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */
Vineet Gupta95d69762013-01-18 15:12:19 +0530456 aux = ARC_REG_DC_IVDC;
457 else
458 aux = ARC_REG_DC_FLSH;
459
460 write_aux_reg(aux, 0x1);
461
Vineet Gupta6c310682015-06-04 08:53:47 +0530462 __after_dc_op(op);
Vineet Gupta95d69762013-01-18 15:12:19 +0530463}
464
Vineet Gupta4102b532013-05-09 21:54:51 +0530465/* For kernel mappings cache operation: index is same as paddr */
Vineet Gupta6ec18a82013-05-09 15:10:18 +0530466#define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op)
467
Vineet Gupta95d69762013-01-18 15:12:19 +0530468/*
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530469 * D-Cache Line ops: Per Line INV (discard or wback+discard) or FLUSH (wback)
Vineet Gupta95d69762013-01-18 15:12:19 +0530470 */
Vineet Gupta28b4af72015-09-14 18:43:42 -0700471static inline void __dc_line_op(phys_addr_t paddr, unsigned long vaddr,
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530472 unsigned long sz, const int op)
Vineet Gupta95d69762013-01-18 15:12:19 +0530473{
Vineet Gupta1b1a22b2014-06-29 19:03:58 +0530474 unsigned long flags;
Vineet Gupta95d69762013-01-18 15:12:19 +0530475
476 local_irq_save(flags);
477
Vineet Gupta6c310682015-06-04 08:53:47 +0530478 __before_dc_op(op);
Vineet Gupta95d69762013-01-18 15:12:19 +0530479
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530480 __cache_line_loop(paddr, vaddr, sz, op);
Vineet Gupta95d69762013-01-18 15:12:19 +0530481
Vineet Gupta6c310682015-06-04 08:53:47 +0530482 __after_dc_op(op);
Vineet Gupta95d69762013-01-18 15:12:19 +0530483
484 local_irq_restore(flags);
485}
486
487#else
488
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530489#define __dc_entire_op(op)
490#define __dc_line_op(paddr, vaddr, sz, op)
491#define __dc_line_op_k(paddr, sz, op)
Vineet Gupta95d69762013-01-18 15:12:19 +0530492
493#endif /* CONFIG_ARC_HAS_DCACHE */
494
Vineet Gupta95d69762013-01-18 15:12:19 +0530495#ifdef CONFIG_ARC_HAS_ICACHE
496
Vineet Guptaaf5abf12014-07-09 14:59:47 +0530497static inline void __ic_entire_inv(void)
498{
499 write_aux_reg(ARC_REG_IC_IVIC, 1);
500 read_aux_reg(ARC_REG_IC_CTRL); /* blocks */
501}
502
503static inline void
Vineet Gupta28b4af72015-09-14 18:43:42 -0700504__ic_line_inv_vaddr_local(phys_addr_t paddr, unsigned long vaddr,
Vineet Guptaaf5abf12014-07-09 14:59:47 +0530505 unsigned long sz)
Vineet Gupta95d69762013-01-18 15:12:19 +0530506{
507 unsigned long flags;
Vineet Gupta95d69762013-01-18 15:12:19 +0530508
509 local_irq_save(flags);
Vineet Guptabcc4d652015-06-04 14:39:15 +0530510 (*_cache_line_loop_ic_fn)(paddr, vaddr, sz, OP_INV_IC);
Vineet Gupta95d69762013-01-18 15:12:19 +0530511 local_irq_restore(flags);
512}
513
Vineet Guptaaf5abf12014-07-09 14:59:47 +0530514#ifndef CONFIG_SMP
Vineet Gupta336e1992013-06-22 19:22:42 +0530515
Vineet Guptaaf5abf12014-07-09 14:59:47 +0530516#define __ic_line_inv_vaddr(p, v, s) __ic_line_inv_vaddr_local(p, v, s)
517
518#else
519
520struct ic_inv_args {
Vineet Gupta28b4af72015-09-14 18:43:42 -0700521 phys_addr_t paddr, vaddr;
Vineet Gupta2328af02013-02-17 12:51:42 +0200522 int sz;
523};
524
525static void __ic_line_inv_vaddr_helper(void *info)
526{
Noam Camus014018e2014-09-03 14:41:11 +0300527 struct ic_inv_args *ic_inv = info;
Vineet Guptaaf5abf12014-07-09 14:59:47 +0530528
Vineet Gupta2328af02013-02-17 12:51:42 +0200529 __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz);
530}
531
Vineet Gupta28b4af72015-09-14 18:43:42 -0700532static void __ic_line_inv_vaddr(phys_addr_t paddr, unsigned long vaddr,
Vineet Gupta2328af02013-02-17 12:51:42 +0200533 unsigned long sz)
534{
Vineet Guptaaf5abf12014-07-09 14:59:47 +0530535 struct ic_inv_args ic_inv = {
536 .paddr = paddr,
537 .vaddr = vaddr,
538 .sz = sz
539 };
540
Vineet Gupta2328af02013-02-17 12:51:42 +0200541 on_each_cpu(__ic_line_inv_vaddr_helper, &ic_inv, 1);
542}
Vineet Guptaaf5abf12014-07-09 14:59:47 +0530543
544#endif /* CONFIG_SMP */
545
546#else /* !CONFIG_ARC_HAS_ICACHE */
Vineet Gupta95d69762013-01-18 15:12:19 +0530547
Vineet Gupta336e1992013-06-22 19:22:42 +0530548#define __ic_entire_inv()
Vineet Gupta95d69762013-01-18 15:12:19 +0530549#define __ic_line_inv_vaddr(pstart, vstart, sz)
550
551#endif /* CONFIG_ARC_HAS_ICACHE */
552
Vineet Gupta28b4af72015-09-14 18:43:42 -0700553noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op)
Vineet Gupta795f4552015-04-03 12:37:07 +0300554{
555#ifdef CONFIG_ISA_ARCV2
Alexey Brodkinb607edd2015-06-29 15:24:37 +0300556 /*
557 * SLC is shared between all cores and concurrent aux operations from
558 * multiple cores need to be serialized using a spinlock
559 * A concurrent operation can be silently ignored and/or the old/new
560 * operation can remain incomplete forever (lockup in SLC_CTRL_BUSY loop
561 * below)
562 */
563 static DEFINE_SPINLOCK(lock);
Vineet Gupta795f4552015-04-03 12:37:07 +0300564 unsigned long flags;
565 unsigned int ctrl;
566
Alexey Brodkinb607edd2015-06-29 15:24:37 +0300567 spin_lock_irqsave(&lock, flags);
Vineet Gupta795f4552015-04-03 12:37:07 +0300568
569 /*
570 * The Region Flush operation is specified by CTRL.RGN_OP[11..9]
571 * - b'000 (default) is Flush,
572 * - b'001 is Invalidate if CTRL.IM == 0
573 * - b'001 is Flush-n-Invalidate if CTRL.IM == 1
574 */
575 ctrl = read_aux_reg(ARC_REG_SLC_CTRL);
576
577 /* Don't rely on default value of IM bit */
578 if (!(op & OP_FLUSH)) /* i.e. OP_INV */
579 ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
580 else
581 ctrl |= SLC_CTRL_IM;
582
583 if (op & OP_INV)
584 ctrl |= SLC_CTRL_RGN_OP_INV; /* Inv or flush-n-inv */
585 else
586 ctrl &= ~SLC_CTRL_RGN_OP_INV;
587
588 write_aux_reg(ARC_REG_SLC_CTRL, ctrl);
589
590 /*
591 * Lower bits are ignored, no need to clip
592 * END needs to be setup before START (latter triggers the operation)
593 * END can't be same as START, so add (l2_line_sz - 1) to sz
594 */
595 write_aux_reg(ARC_REG_SLC_RGN_END, (paddr + sz + l2_line_sz - 1));
596 write_aux_reg(ARC_REG_SLC_RGN_START, paddr);
597
598 while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
599
Alexey Brodkinb607edd2015-06-29 15:24:37 +0300600 spin_unlock_irqrestore(&lock, flags);
Vineet Gupta795f4552015-04-03 12:37:07 +0300601#endif
602}
603
Vineet Guptad4911cd2016-06-22 15:43:22 +0530604noinline static void slc_entire_op(const int op)
605{
606 unsigned int ctrl, r = ARC_REG_SLC_CTRL;
607
608 ctrl = read_aux_reg(r);
609
610 if (!(op & OP_FLUSH)) /* i.e. OP_INV */
611 ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
612 else
613 ctrl |= SLC_CTRL_IM;
614
615 write_aux_reg(r, ctrl);
616
617 write_aux_reg(ARC_REG_SLC_INVALIDATE, 1);
618
619 /* Important to wait for flush to complete */
620 while (read_aux_reg(r) & SLC_CTRL_BUSY);
621}
622
623static inline void arc_slc_disable(void)
624{
625 const int r = ARC_REG_SLC_CTRL;
626
627 slc_entire_op(OP_FLUSH_N_INV);
628 write_aux_reg(r, read_aux_reg(r) | SLC_CTRL_DIS);
629}
630
631static inline void arc_slc_enable(void)
632{
633 const int r = ARC_REG_SLC_CTRL;
634
635 write_aux_reg(r, read_aux_reg(r) & ~SLC_CTRL_DIS);
636}
637
Vineet Gupta95d69762013-01-18 15:12:19 +0530638/***********************************************************
639 * Exported APIs
640 */
641
Vineet Gupta4102b532013-05-09 21:54:51 +0530642/*
643 * Handle cache congruency of kernel and userspace mappings of page when kernel
644 * writes-to/reads-from
645 *
646 * The idea is to defer flushing of kernel mapping after a WRITE, possible if:
647 * -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
648 * -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
649 * -In SMP, if hardware caches are coherent
650 *
651 * There's a corollary case, where kernel READs from a userspace mapped page.
652 * If the U-mapping is not congruent to to K-mapping, former needs flushing.
653 */
Vineet Gupta95d69762013-01-18 15:12:19 +0530654void flush_dcache_page(struct page *page)
655{
Vineet Gupta4102b532013-05-09 21:54:51 +0530656 struct address_space *mapping;
657
658 if (!cache_is_vipt_aliasing()) {
Vineet Gupta2ed21da2013-05-13 17:23:58 +0530659 clear_bit(PG_dc_clean, &page->flags);
Vineet Gupta4102b532013-05-09 21:54:51 +0530660 return;
661 }
662
663 /* don't handle anon pages here */
664 mapping = page_mapping(page);
665 if (!mapping)
666 return;
667
668 /*
669 * pagecache page, file not yet mapped to userspace
670 * Make a note that K-mapping is dirty
671 */
672 if (!mapping_mapped(mapping)) {
Vineet Gupta2ed21da2013-05-13 17:23:58 +0530673 clear_bit(PG_dc_clean, &page->flags);
Kirill A. Shutemove1534ae2016-01-15 16:53:46 -0800674 } else if (page_mapcount(page)) {
Vineet Gupta4102b532013-05-09 21:54:51 +0530675
676 /* kernel reading from page with U-mapping */
Vineet Gupta28b4af72015-09-14 18:43:42 -0700677 phys_addr_t paddr = (unsigned long)page_address(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300678 unsigned long vaddr = page->index << PAGE_SHIFT;
Vineet Gupta4102b532013-05-09 21:54:51 +0530679
680 if (addr_not_cache_congruent(paddr, vaddr))
681 __flush_dcache_page(paddr, vaddr);
682 }
Vineet Gupta95d69762013-01-18 15:12:19 +0530683}
684EXPORT_SYMBOL(flush_dcache_page);
685
Alexey Brodkinf2b0b252015-05-25 19:54:28 +0300686/*
687 * DMA ops for systems with L1 cache only
688 * Make memory coherent with L1 cache by flushing/invalidating L1 lines
689 */
Vineet Guptaf5db19e2016-03-16 15:04:39 +0530690static void __dma_cache_wback_inv_l1(phys_addr_t start, unsigned long sz)
Vineet Gupta95d69762013-01-18 15:12:19 +0530691{
Vineet Gupta6ec18a82013-05-09 15:10:18 +0530692 __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
Alexey Brodkinf2b0b252015-05-25 19:54:28 +0300693}
Vineet Gupta795f4552015-04-03 12:37:07 +0300694
Vineet Guptaf5db19e2016-03-16 15:04:39 +0530695static void __dma_cache_inv_l1(phys_addr_t start, unsigned long sz)
Alexey Brodkinf2b0b252015-05-25 19:54:28 +0300696{
697 __dc_line_op_k(start, sz, OP_INV);
698}
699
Vineet Guptaf5db19e2016-03-16 15:04:39 +0530700static void __dma_cache_wback_l1(phys_addr_t start, unsigned long sz)
Alexey Brodkinf2b0b252015-05-25 19:54:28 +0300701{
702 __dc_line_op_k(start, sz, OP_FLUSH);
703}
704
705/*
706 * DMA ops for systems with both L1 and L2 caches, but without IOC
Adam Buchbinder7423cc02016-02-23 15:24:55 -0800707 * Both L1 and L2 lines need to be explicitly flushed/invalidated
Alexey Brodkinf2b0b252015-05-25 19:54:28 +0300708 */
Vineet Guptaf5db19e2016-03-16 15:04:39 +0530709static void __dma_cache_wback_inv_slc(phys_addr_t start, unsigned long sz)
Alexey Brodkinf2b0b252015-05-25 19:54:28 +0300710{
711 __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
712 slc_op(start, sz, OP_FLUSH_N_INV);
713}
714
Vineet Guptaf5db19e2016-03-16 15:04:39 +0530715static void __dma_cache_inv_slc(phys_addr_t start, unsigned long sz)
Alexey Brodkinf2b0b252015-05-25 19:54:28 +0300716{
717 __dc_line_op_k(start, sz, OP_INV);
718 slc_op(start, sz, OP_INV);
719}
720
Vineet Guptaf5db19e2016-03-16 15:04:39 +0530721static void __dma_cache_wback_slc(phys_addr_t start, unsigned long sz)
Alexey Brodkinf2b0b252015-05-25 19:54:28 +0300722{
723 __dc_line_op_k(start, sz, OP_FLUSH);
724 slc_op(start, sz, OP_FLUSH);
725}
726
727/*
728 * DMA ops for systems with IOC
729 * IOC hardware snoops all DMA traffic keeping the caches consistent with
730 * memory - eliding need for any explicit cache maintenance of DMA buffers
731 */
Vineet Guptaf5db19e2016-03-16 15:04:39 +0530732static void __dma_cache_wback_inv_ioc(phys_addr_t start, unsigned long sz) {}
733static void __dma_cache_inv_ioc(phys_addr_t start, unsigned long sz) {}
734static void __dma_cache_wback_ioc(phys_addr_t start, unsigned long sz) {}
Alexey Brodkinf2b0b252015-05-25 19:54:28 +0300735
736/*
737 * Exported DMA API
738 */
Vineet Guptaf5db19e2016-03-16 15:04:39 +0530739void dma_cache_wback_inv(phys_addr_t start, unsigned long sz)
Alexey Brodkinf2b0b252015-05-25 19:54:28 +0300740{
741 __dma_cache_wback_inv(start, sz);
Vineet Gupta95d69762013-01-18 15:12:19 +0530742}
743EXPORT_SYMBOL(dma_cache_wback_inv);
744
Vineet Guptaf5db19e2016-03-16 15:04:39 +0530745void dma_cache_inv(phys_addr_t start, unsigned long sz)
Vineet Gupta95d69762013-01-18 15:12:19 +0530746{
Alexey Brodkinf2b0b252015-05-25 19:54:28 +0300747 __dma_cache_inv(start, sz);
Vineet Gupta95d69762013-01-18 15:12:19 +0530748}
749EXPORT_SYMBOL(dma_cache_inv);
750
Vineet Guptaf5db19e2016-03-16 15:04:39 +0530751void dma_cache_wback(phys_addr_t start, unsigned long sz)
Vineet Gupta95d69762013-01-18 15:12:19 +0530752{
Alexey Brodkinf2b0b252015-05-25 19:54:28 +0300753 __dma_cache_wback(start, sz);
Vineet Gupta95d69762013-01-18 15:12:19 +0530754}
755EXPORT_SYMBOL(dma_cache_wback);
756
757/*
Vineet Gupta7586bf722013-04-12 12:18:25 +0530758 * This is API for making I/D Caches consistent when modifying
759 * kernel code (loadable modules, kprobes, kgdb...)
Vineet Gupta95d69762013-01-18 15:12:19 +0530760 * This is called on insmod, with kernel virtual address for CODE of
761 * the module. ARC cache maintenance ops require PHY address thus we
762 * need to convert vmalloc addr to PHY addr
763 */
764void flush_icache_range(unsigned long kstart, unsigned long kend)
765{
Vineet Guptac59414c2014-09-24 11:36:20 +0530766 unsigned int tot_sz;
Vineet Gupta95d69762013-01-18 15:12:19 +0530767
Vineet Guptac59414c2014-09-24 11:36:20 +0530768 WARN(kstart < TASK_SIZE, "%s() can't handle user vaddr", __func__);
Vineet Gupta95d69762013-01-18 15:12:19 +0530769
770 /* Shortcut for bigger flush ranges.
771 * Here we don't care if this was kernel virtual or phy addr
772 */
773 tot_sz = kend - kstart;
774 if (tot_sz > PAGE_SIZE) {
775 flush_cache_all();
776 return;
777 }
778
779 /* Case: Kernel Phy addr (0x8000_0000 onwards) */
780 if (likely(kstart > PAGE_OFFSET)) {
Vineet Gupta7586bf722013-04-12 12:18:25 +0530781 /*
782 * The 2nd arg despite being paddr will be used to index icache
783 * This is OK since no alternate virtual mappings will exist
784 * given the callers for this case: kprobe/kgdb in built-in
785 * kernel code only.
786 */
Vineet Gupta94bad1a2013-04-12 12:20:23 +0530787 __sync_icache_dcache(kstart, kstart, kend - kstart);
Vineet Gupta95d69762013-01-18 15:12:19 +0530788 return;
789 }
790
791 /*
792 * Case: Kernel Vaddr (0x7000_0000 to 0x7fff_ffff)
793 * (1) ARC Cache Maintenance ops only take Phy addr, hence special
794 * handling of kernel vaddr.
795 *
796 * (2) Despite @tot_sz being < PAGE_SIZE (bigger cases handled already),
797 * it still needs to handle a 2 page scenario, where the range
798 * straddles across 2 virtual pages and hence need for loop
799 */
800 while (tot_sz > 0) {
Vineet Guptac59414c2014-09-24 11:36:20 +0530801 unsigned int off, sz;
802 unsigned long phy, pfn;
803
Vineet Gupta95d69762013-01-18 15:12:19 +0530804 off = kstart % PAGE_SIZE;
805 pfn = vmalloc_to_pfn((void *)kstart);
806 phy = (pfn << PAGE_SHIFT) + off;
807 sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off);
Vineet Gupta94bad1a2013-04-12 12:20:23 +0530808 __sync_icache_dcache(phy, kstart, sz);
Vineet Gupta95d69762013-01-18 15:12:19 +0530809 kstart += sz;
810 tot_sz -= sz;
811 }
812}
Pranith Kumare3560302014-08-29 15:19:09 -0700813EXPORT_SYMBOL(flush_icache_range);
Vineet Gupta95d69762013-01-18 15:12:19 +0530814
815/*
Vineet Gupta94bad1a2013-04-12 12:20:23 +0530816 * General purpose helper to make I and D cache lines consistent.
817 * @paddr is phy addr of region
Vineet Gupta4b06ff32013-07-10 11:40:27 +0530818 * @vaddr is typically user vaddr (breakpoint) or kernel vaddr (vmalloc)
819 * However in one instance, when called by kprobe (for a breakpt in
Vineet Gupta94bad1a2013-04-12 12:20:23 +0530820 * builtin kernel code) @vaddr will be paddr only, meaning CDU operation will
821 * use a paddr to index the cache (despite VIPT). This is fine since since a
Vineet Gupta4b06ff32013-07-10 11:40:27 +0530822 * builtin kernel page will not have any virtual mappings.
823 * kprobe on loadable module will be kernel vaddr.
Vineet Gupta95d69762013-01-18 15:12:19 +0530824 */
Vineet Gupta28b4af72015-09-14 18:43:42 -0700825void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len)
Vineet Gupta95d69762013-01-18 15:12:19 +0530826{
Vineet Guptaf5388812013-05-16 12:19:29 +0530827 __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
Vineet Gupta2328af02013-02-17 12:51:42 +0200828 __ic_line_inv_vaddr(paddr, vaddr, len);
Vineet Gupta95d69762013-01-18 15:12:19 +0530829}
830
Vineet Gupta24603fd2013-04-11 18:36:35 +0530831/* wrapper to compile time eliminate alignment checks in flush loop */
Vineet Gupta28b4af72015-09-14 18:43:42 -0700832void __inv_icache_page(phys_addr_t paddr, unsigned long vaddr)
Vineet Gupta95d69762013-01-18 15:12:19 +0530833{
Vineet Gupta24603fd2013-04-11 18:36:35 +0530834 __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
Vineet Gupta95d69762013-01-18 15:12:19 +0530835}
836
Vineet Gupta6ec18a82013-05-09 15:10:18 +0530837/*
838 * wrapper to clearout kernel or userspace mappings of a page
839 * For kernel mappings @vaddr == @paddr
840 */
Vineet Gupta28b4af72015-09-14 18:43:42 -0700841void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr)
Vineet Guptaeacd0e92013-04-16 14:10:48 +0530842{
Vineet Gupta6ec18a82013-05-09 15:10:18 +0530843 __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV);
Vineet Guptaeacd0e92013-04-16 14:10:48 +0530844}
845
Vineet Gupta95d69762013-01-18 15:12:19 +0530846noinline void flush_cache_all(void)
847{
848 unsigned long flags;
849
850 local_irq_save(flags);
851
Vineet Gupta336e1992013-06-22 19:22:42 +0530852 __ic_entire_inv();
Vineet Gupta95d69762013-01-18 15:12:19 +0530853 __dc_entire_op(OP_FLUSH_N_INV);
854
855 local_irq_restore(flags);
856
857}
858
Vineet Gupta4102b532013-05-09 21:54:51 +0530859#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
860
861void flush_cache_mm(struct mm_struct *mm)
862{
863 flush_cache_all();
864}
865
866void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
867 unsigned long pfn)
868{
869 unsigned int paddr = pfn << PAGE_SHIFT;
870
Vineet Gupta5971bc72013-05-16 12:23:31 +0530871 u_vaddr &= PAGE_MASK;
872
Vineet Gupta45309492015-05-18 12:46:37 +0530873 __flush_dcache_page(paddr, u_vaddr);
Vineet Gupta5971bc72013-05-16 12:23:31 +0530874
875 if (vma->vm_flags & VM_EXEC)
876 __inv_icache_page(paddr, u_vaddr);
Vineet Gupta4102b532013-05-09 21:54:51 +0530877}
878
879void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
880 unsigned long end)
881{
882 flush_cache_all();
883}
884
Vineet Gupta7bb66f62013-05-25 14:04:25 +0530885void flush_anon_page(struct vm_area_struct *vma, struct page *page,
886 unsigned long u_vaddr)
887{
888 /* TBD: do we really need to clear the kernel mapping */
889 __flush_dcache_page(page_address(page), u_vaddr);
890 __flush_dcache_page(page_address(page), page_address(page));
891
892}
893
894#endif
895
Vineet Gupta4102b532013-05-09 21:54:51 +0530896void copy_user_highpage(struct page *to, struct page *from,
897 unsigned long u_vaddr, struct vm_area_struct *vma)
898{
Vineet Gupta336e2132015-03-05 17:06:31 +0530899 void *kfrom = kmap_atomic(from);
900 void *kto = kmap_atomic(to);
Vineet Gupta4102b532013-05-09 21:54:51 +0530901 int clean_src_k_mappings = 0;
902
903 /*
904 * If SRC page was already mapped in userspace AND it's U-mapping is
905 * not congruent with K-mapping, sync former to physical page so that
906 * K-mapping in memcpy below, sees the right data
907 *
908 * Note that while @u_vaddr refers to DST page's userspace vaddr, it is
909 * equally valid for SRC page as well
Vineet Gupta336e2132015-03-05 17:06:31 +0530910 *
911 * For !VIPT cache, all of this gets compiled out as
912 * addr_not_cache_congruent() is 0
Vineet Gupta4102b532013-05-09 21:54:51 +0530913 */
Kirill A. Shutemove1534ae2016-01-15 16:53:46 -0800914 if (page_mapcount(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
Vineet Gupta336e2132015-03-05 17:06:31 +0530915 __flush_dcache_page((unsigned long)kfrom, u_vaddr);
Vineet Gupta4102b532013-05-09 21:54:51 +0530916 clean_src_k_mappings = 1;
917 }
918
Vineet Gupta336e2132015-03-05 17:06:31 +0530919 copy_page(kto, kfrom);
Vineet Gupta4102b532013-05-09 21:54:51 +0530920
921 /*
922 * Mark DST page K-mapping as dirty for a later finalization by
923 * update_mmu_cache(). Although the finalization could have been done
924 * here as well (given that both vaddr/paddr are available).
925 * But update_mmu_cache() already has code to do that for other
926 * non copied user pages (e.g. read faults which wire in pagecache page
927 * directly).
928 */
Vineet Gupta2ed21da2013-05-13 17:23:58 +0530929 clear_bit(PG_dc_clean, &to->flags);
Vineet Gupta4102b532013-05-09 21:54:51 +0530930
931 /*
932 * if SRC was already usermapped and non-congruent to kernel mapping
933 * sync the kernel mapping back to physical page
934 */
935 if (clean_src_k_mappings) {
Vineet Gupta336e2132015-03-05 17:06:31 +0530936 __flush_dcache_page((unsigned long)kfrom, (unsigned long)kfrom);
Vineet Gupta2ed21da2013-05-13 17:23:58 +0530937 set_bit(PG_dc_clean, &from->flags);
Vineet Gupta4102b532013-05-09 21:54:51 +0530938 } else {
Vineet Gupta2ed21da2013-05-13 17:23:58 +0530939 clear_bit(PG_dc_clean, &from->flags);
Vineet Gupta4102b532013-05-09 21:54:51 +0530940 }
Vineet Gupta336e2132015-03-05 17:06:31 +0530941
942 kunmap_atomic(kto);
943 kunmap_atomic(kfrom);
Vineet Gupta4102b532013-05-09 21:54:51 +0530944}
945
946void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
947{
948 clear_page(to);
Vineet Gupta2ed21da2013-05-13 17:23:58 +0530949 clear_bit(PG_dc_clean, &page->flags);
Vineet Gupta4102b532013-05-09 21:54:51 +0530950}
951
Vineet Gupta4102b532013-05-09 21:54:51 +0530952
Vineet Gupta95d69762013-01-18 15:12:19 +0530953/**********************************************************************
954 * Explicit Cache flush request from user space via syscall
955 * Needed for JITs which generate code on the fly
956 */
957SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
958{
959 /* TBD: optimize this */
960 flush_cache_all();
961 return 0;
962}
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530963
Vineet Guptad4911cd2016-06-22 15:43:22 +0530964noinline void arc_ioc_setup(void)
965{
966 write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000);
967 write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, 0x11);
968 write_aux_reg(ARC_REG_IO_COH_PARTIAL, 1);
969 write_aux_reg(ARC_REG_IO_COH_ENABLE, 1);
970}
971
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530972void arc_cache_init(void)
973{
974 unsigned int __maybe_unused cpu = smp_processor_id();
975 char str[256];
976
977 printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
978
Vineet Gupta45c3b082016-06-13 16:38:27 +0200979 /*
980 * Only master CPU needs to execute rest of function:
981 * - Assume SMP so all cores will have same cache config so
982 * any geomtry checks will be same for all
983 * - IOC setup / dma callbacks only need to be setup once
984 */
985 if (cpu)
986 return;
987
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530988 if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
989 struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
990
Vineet Guptaf64915b2016-12-19 11:24:08 -0800991 if (!ic->line_len)
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530992 panic("cache support enabled but non-existent cache\n");
993
994 if (ic->line_len != L1_CACHE_BYTES)
995 panic("ICache line [%d] != kernel Config [%d]",
996 ic->line_len, L1_CACHE_BYTES);
997
Vineet Guptabcc4d652015-06-04 14:39:15 +0530998 /*
Andrea Gelmini25474762016-05-21 13:45:35 +0200999 * In MMU v4 (HS38x) the aliasing icache config uses IVIL/PTAG
Vineet Guptabcc4d652015-06-04 14:39:15 +05301000 * pair to provide vaddr/paddr respectively, just as in MMU v3
1001 */
1002 if (is_isa_arcv2() && ic->alias)
1003 _cache_line_loop_ic_fn = __cache_line_loop_v3;
1004 else
1005 _cache_line_loop_ic_fn = __cache_line_loop;
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +05301006 }
1007
1008 if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) {
1009 struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +05301010
Vineet Guptaf64915b2016-12-19 11:24:08 -08001011 if (!dc->line_len)
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +05301012 panic("cache support enabled but non-existent cache\n");
1013
1014 if (dc->line_len != L1_CACHE_BYTES)
1015 panic("DCache line [%d] != kernel Config [%d]",
1016 dc->line_len, L1_CACHE_BYTES);
1017
Vineet Guptad1f317d2015-04-06 17:23:57 +05301018 /* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */
1019 if (is_isa_arcompact()) {
1020 int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
Vineet Gupta08fe0072016-12-19 11:38:38 -08001021 int num_colors = dc->sz_k/dc->assoc/TO_KB(PAGE_SIZE);
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +05301022
Vineet Gupta08fe0072016-12-19 11:38:38 -08001023 if (dc->alias) {
1024 if (!handled)
1025 panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
1026 if (CACHE_COLORS_NUM != num_colors)
1027 panic("CACHE_COLORS_NUM not optimized for config\n");
1028 } else if (!dc->alias && handled) {
Vineet Guptad1f317d2015-04-06 17:23:57 +05301029 panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
Vineet Gupta08fe0072016-12-19 11:38:38 -08001030 }
Vineet Guptad1f317d2015-04-06 17:23:57 +05301031 }
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +05301032 }
Alexey Brodkinf2b0b252015-05-25 19:54:28 +03001033
Vineet Guptad4911cd2016-06-22 15:43:22 +05301034 /* Note that SLC disable not formally supported till HS 3.0 */
1035 if (is_isa_arcv2() && l2_line_sz && !slc_enable)
1036 arc_slc_disable();
Vineet Gupta79335a22015-06-04 18:30:23 +05301037
Vineet Guptad4911cd2016-06-22 15:43:22 +05301038 if (is_isa_arcv2() && ioc_enable)
1039 arc_ioc_setup();
Vineet Gupta79335a22015-06-04 18:30:23 +05301040
Vineet Guptacf986d42016-10-13 15:58:59 -07001041 if (is_isa_arcv2() && ioc_enable) {
Alexey Brodkinf2b0b252015-05-25 19:54:28 +03001042 __dma_cache_wback_inv = __dma_cache_wback_inv_ioc;
1043 __dma_cache_inv = __dma_cache_inv_ioc;
1044 __dma_cache_wback = __dma_cache_wback_ioc;
Vineet Gupta79335a22015-06-04 18:30:23 +05301045 } else if (is_isa_arcv2() && l2_line_sz && slc_enable) {
Alexey Brodkinf2b0b252015-05-25 19:54:28 +03001046 __dma_cache_wback_inv = __dma_cache_wback_inv_slc;
1047 __dma_cache_inv = __dma_cache_inv_slc;
1048 __dma_cache_wback = __dma_cache_wback_slc;
1049 } else {
1050 __dma_cache_wback_inv = __dma_cache_wback_inv_l1;
1051 __dma_cache_inv = __dma_cache_inv_l1;
1052 __dma_cache_wback = __dma_cache_wback_l1;
1053 }
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +05301054}