blob: 25e7077d4c049b523caa5cca0bcc8ea202851135 [file] [log] [blame]
Vineet Gupta95d69762013-01-18 15:12:19 +05301/*
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +05302 * ARC Cache Management
Vineet Gupta95d69762013-01-18 15:12:19 +05303 *
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +05304 * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
Vineet Gupta95d69762013-01-18 15:12:19 +05305 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
Vineet Gupta95d69762013-01-18 15:12:19 +053010 */
11
12#include <linux/module.h>
13#include <linux/mm.h>
14#include <linux/sched.h>
15#include <linux/cache.h>
16#include <linux/mmu_context.h>
17#include <linux/syscalls.h>
18#include <linux/uaccess.h>
Vineet Gupta4102b532013-05-09 21:54:51 +053019#include <linux/pagemap.h>
Vineet Gupta95d69762013-01-18 15:12:19 +053020#include <asm/cacheflush.h>
21#include <asm/cachectl.h>
22#include <asm/setup.h>
23
Vineet Gupta795f4552015-04-03 12:37:07 +030024static int l2_line_sz;
Alexey Brodkinf2b0b252015-05-25 19:54:28 +030025int ioc_exists;
Vineet Gupta795f4552015-04-03 12:37:07 +030026
Vineet Guptabcc4d652015-06-04 14:39:15 +053027void (*_cache_line_loop_ic_fn)(unsigned long paddr, unsigned long vaddr,
28 unsigned long sz, const int cacheop);
29
Alexey Brodkinf2b0b252015-05-25 19:54:28 +030030void (*__dma_cache_wback_inv)(unsigned long start, unsigned long sz);
31void (*__dma_cache_inv)(unsigned long start, unsigned long sz);
32void (*__dma_cache_wback)(unsigned long start, unsigned long sz);
33
Vineet Guptac3441ed2014-02-24 11:42:50 +080034char *arc_cache_mumbojumbo(int c, char *buf, int len)
Vineet Guptaaf617422013-01-18 15:12:24 +053035{
36 int n = 0;
Vineet Guptad1f317d2015-04-06 17:23:57 +053037 struct cpuinfo_arc_cache *p;
Vineet Guptaaf617422013-01-18 15:12:24 +053038
Vineet Guptada40ff42014-06-27 15:49:47 +053039#define PR_CACHE(p, cfg, str) \
Vineet Guptaaf617422013-01-18 15:12:24 +053040 if (!(p)->ver) \
41 n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \
42 else \
43 n += scnprintf(buf + n, len - n, \
Vineet Guptada40ff42014-06-27 15:49:47 +053044 str"\t\t: %uK, %dway/set, %uB Line, %s%s%s\n", \
45 (p)->sz_k, (p)->assoc, (p)->line_len, \
46 (p)->vipt ? "VIPT" : "PIPT", \
47 (p)->alias ? " aliasing" : "", \
48 IS_ENABLED(cfg) ? "" : " (not used)");
Vineet Guptaaf617422013-01-18 15:12:24 +053049
Vineet Guptada40ff42014-06-27 15:49:47 +053050 PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache");
51 PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");
Vineet Guptaaf617422013-01-18 15:12:24 +053052
Vineet Guptad1f317d2015-04-06 17:23:57 +053053 p = &cpuinfo_arc700[c].slc;
54 if (p->ver)
55 n += scnprintf(buf + n, len - n,
56 "SLC\t\t: %uK, %uB Line\n", p->sz_k, p->line_len);
57
Alexey Brodkinf2b0b252015-05-25 19:54:28 +030058 if (ioc_exists)
59 n += scnprintf(buf + n, len - n, "IOC\t\t: exists\n");
60
Vineet Guptaaf617422013-01-18 15:12:24 +053061 return buf;
62}
63
Vineet Gupta95d69762013-01-18 15:12:19 +053064/*
65 * Read the Cache Build Confuration Registers, Decode them and save into
66 * the cpuinfo structure for later use.
67 * No Validation done here, simply read/convert the BCRs
68 */
Paul Gortmakerce759952013-06-24 15:30:15 -040069void read_decode_cache_bcr(void)
Vineet Gupta95d69762013-01-18 15:12:19 +053070{
Vineet Guptad1f317d2015-04-06 17:23:57 +053071 struct cpuinfo_arc_cache *p_ic, *p_dc, *p_slc;
Vineet Gupta95d69762013-01-18 15:12:19 +053072 unsigned int cpu = smp_processor_id();
Vineet Guptada1677b2013-05-14 13:28:17 +053073 struct bcr_cache {
74#ifdef CONFIG_CPU_BIG_ENDIAN
75 unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
76#else
77 unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
78#endif
79 } ibcr, dbcr;
Vineet Gupta95d69762013-01-18 15:12:19 +053080
Vineet Guptad1f317d2015-04-06 17:23:57 +053081 struct bcr_generic sbcr;
82
83 struct bcr_slc_cfg {
84#ifdef CONFIG_CPU_BIG_ENDIAN
85 unsigned int pad:24, way:2, lsz:2, sz:4;
86#else
87 unsigned int sz:4, lsz:2, way:2, pad:24;
88#endif
89 } slc_cfg;
90
Alexey Brodkinf2b0b252015-05-25 19:54:28 +030091 struct bcr_clust_cfg {
92#ifdef CONFIG_CPU_BIG_ENDIAN
93 unsigned int pad:7, c:1, num_entries:8, num_cores:8, ver:8;
94#else
95 unsigned int ver:8, num_cores:8, num_entries:8, c:1, pad:7;
96#endif
97 } cbcr;
98
Vineet Gupta95d69762013-01-18 15:12:19 +053099 p_ic = &cpuinfo_arc700[cpu].icache;
100 READ_BCR(ARC_REG_IC_BCR, ibcr);
101
Vineet Guptada40ff42014-06-27 15:49:47 +0530102 if (!ibcr.ver)
103 goto dc_chk;
104
Vineet Guptad1f317d2015-04-06 17:23:57 +0530105 if (ibcr.ver <= 3) {
106 BUG_ON(ibcr.config != 3);
107 p_ic->assoc = 2; /* Fixed to 2w set assoc */
108 } else if (ibcr.ver >= 4) {
109 p_ic->assoc = 1 << ibcr.config; /* 1,2,4,8 */
110 }
111
Vineet Gupta95d69762013-01-18 15:12:19 +0530112 p_ic->line_len = 8 << ibcr.line_len;
Vineet Guptada40ff42014-06-27 15:49:47 +0530113 p_ic->sz_k = 1 << (ibcr.sz - 1);
Vineet Gupta95d69762013-01-18 15:12:19 +0530114 p_ic->ver = ibcr.ver;
Vineet Guptada40ff42014-06-27 15:49:47 +0530115 p_ic->vipt = 1;
116 p_ic->alias = p_ic->sz_k/p_ic->assoc/TO_KB(PAGE_SIZE) > 1;
Vineet Gupta95d69762013-01-18 15:12:19 +0530117
Vineet Guptada40ff42014-06-27 15:49:47 +0530118dc_chk:
Vineet Gupta95d69762013-01-18 15:12:19 +0530119 p_dc = &cpuinfo_arc700[cpu].dcache;
120 READ_BCR(ARC_REG_DC_BCR, dbcr);
121
Vineet Guptada40ff42014-06-27 15:49:47 +0530122 if (!dbcr.ver)
Vineet Guptad1f317d2015-04-06 17:23:57 +0530123 goto slc_chk;
Vineet Guptada40ff42014-06-27 15:49:47 +0530124
Vineet Guptad1f317d2015-04-06 17:23:57 +0530125 if (dbcr.ver <= 3) {
126 BUG_ON(dbcr.config != 2);
127 p_dc->assoc = 4; /* Fixed to 4w set assoc */
128 p_dc->vipt = 1;
129 p_dc->alias = p_dc->sz_k/p_dc->assoc/TO_KB(PAGE_SIZE) > 1;
130 } else if (dbcr.ver >= 4) {
131 p_dc->assoc = 1 << dbcr.config; /* 1,2,4,8 */
132 p_dc->vipt = 0;
133 p_dc->alias = 0; /* PIPT so can't VIPT alias */
134 }
135
Vineet Gupta95d69762013-01-18 15:12:19 +0530136 p_dc->line_len = 16 << dbcr.line_len;
Vineet Guptada40ff42014-06-27 15:49:47 +0530137 p_dc->sz_k = 1 << (dbcr.sz - 1);
Vineet Gupta95d69762013-01-18 15:12:19 +0530138 p_dc->ver = dbcr.ver;
Vineet Guptad1f317d2015-04-06 17:23:57 +0530139
140slc_chk:
Vineet Gupta795f4552015-04-03 12:37:07 +0300141 if (!is_isa_arcv2())
142 return;
143
Vineet Guptad1f317d2015-04-06 17:23:57 +0530144 p_slc = &cpuinfo_arc700[cpu].slc;
145 READ_BCR(ARC_REG_SLC_BCR, sbcr);
146 if (sbcr.ver) {
147 READ_BCR(ARC_REG_SLC_CFG, slc_cfg);
148 p_slc->ver = sbcr.ver;
149 p_slc->sz_k = 128 << slc_cfg.sz;
Vineet Gupta795f4552015-04-03 12:37:07 +0300150 l2_line_sz = p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64;
Vineet Guptad1f317d2015-04-06 17:23:57 +0530151 }
Alexey Brodkinf2b0b252015-05-25 19:54:28 +0300152
153 READ_BCR(ARC_REG_CLUSTER_BCR, cbcr);
154 if (cbcr.c)
155 ioc_exists = 1;
Vineet Gupta95d69762013-01-18 15:12:19 +0530156}
157
158/*
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530159 * Line Operation on {I,D}-Cache
Vineet Gupta95d69762013-01-18 15:12:19 +0530160 */
Vineet Gupta95d69762013-01-18 15:12:19 +0530161
162#define OP_INV 0x1
163#define OP_FLUSH 0x2
164#define OP_FLUSH_N_INV 0x3
Vineet Guptabd129762013-09-05 13:43:03 +0530165#define OP_INV_IC 0x4
166
167/*
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530168 * I-Cache Aliasing in ARC700 VIPT caches (MMU v1-v3)
169 *
170 * ARC VIPT I-cache uses vaddr to index into cache and paddr to match the tag.
171 * The orig Cache Management Module "CDU" only required paddr to invalidate a
172 * certain line since it sufficed as index in Non-Aliasing VIPT cache-geometry.
173 * Infact for distinct V1,V2,P: all of {V1-P},{V2-P},{P-P} would end up fetching
174 * the exact same line.
175 *
176 * However for larger Caches (way-size > page-size) - i.e. in Aliasing config,
177 * paddr alone could not be used to correctly index the cache.
178 *
179 * ------------------
180 * MMU v1/v2 (Fixed Page Size 8k)
181 * ------------------
182 * The solution was to provide CDU with these additonal vaddr bits. These
183 * would be bits [x:13], x would depend on cache-geometry, 13 comes from
184 * standard page size of 8k.
185 * H/w folks chose [17:13] to be a future safe range, and moreso these 5 bits
186 * of vaddr could easily be "stuffed" in the paddr as bits [4:0] since the
187 * orig 5 bits of paddr were anyways ignored by CDU line ops, as they
188 * represent the offset within cache-line. The adv of using this "clumsy"
189 * interface for additional info was no new reg was needed in CDU programming
190 * model.
191 *
192 * 17:13 represented the max num of bits passable, actual bits needed were
193 * fewer, based on the num-of-aliases possible.
194 * -for 2 alias possibility, only bit 13 needed (32K cache)
195 * -for 4 alias possibility, bits 14:13 needed (64K cache)
196 *
197 * ------------------
198 * MMU v3
199 * ------------------
200 * This ver of MMU supports variable page sizes (1k-16k): although Linux will
201 * only support 8k (default), 16k and 4k.
202 * However from hardware perspective, smaller page sizes aggrevate aliasing
203 * meaning more vaddr bits needed to disambiguate the cache-line-op ;
204 * the existing scheme of piggybacking won't work for certain configurations.
205 * Two new registers IC_PTAG and DC_PTAG inttoduced.
206 * "tag" bits are provided in PTAG, index bits in existing IVIL/IVDL/FLDL regs
Vineet Guptabd129762013-09-05 13:43:03 +0530207 */
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530208
Vineet Gupta11e14892014-08-04 08:32:31 -0700209static inline
210void __cache_line_loop_v2(unsigned long paddr, unsigned long vaddr,
211 unsigned long sz, const int op)
Vineet Guptabd129762013-09-05 13:43:03 +0530212{
Vineet Gupta11e14892014-08-04 08:32:31 -0700213 unsigned int aux_cmd;
Vineet Guptabd129762013-09-05 13:43:03 +0530214 int num_lines;
Vineet Gupta11e14892014-08-04 08:32:31 -0700215 const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
Vineet Guptabd129762013-09-05 13:43:03 +0530216
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530217 if (op == OP_INV_IC) {
Vineet Guptabd129762013-09-05 13:43:03 +0530218 aux_cmd = ARC_REG_IC_IVIL;
Vineet Gupta11e14892014-08-04 08:32:31 -0700219 } else {
Vineet Guptabd129762013-09-05 13:43:03 +0530220 /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530221 aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
Vineet Guptabd129762013-09-05 13:43:03 +0530222 }
223
224 /* Ensure we properly floor/ceil the non-line aligned/sized requests
225 * and have @paddr - aligned to cache line and integral @num_lines.
226 * This however can be avoided for page sized since:
227 * -@paddr will be cache-line aligned already (being page aligned)
228 * -@sz will be integral multiple of line size (being page sized).
229 */
Vineet Gupta11e14892014-08-04 08:32:31 -0700230 if (!full_page) {
Vineet Guptabd129762013-09-05 13:43:03 +0530231 sz += paddr & ~CACHE_LINE_MASK;
232 paddr &= CACHE_LINE_MASK;
233 vaddr &= CACHE_LINE_MASK;
234 }
235
236 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
237
Vineet Guptabd129762013-09-05 13:43:03 +0530238 /* MMUv2 and before: paddr contains stuffed vaddrs bits */
239 paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
Vineet Guptabd129762013-09-05 13:43:03 +0530240
241 while (num_lines-- > 0) {
Vineet Gupta11e14892014-08-04 08:32:31 -0700242 write_aux_reg(aux_cmd, paddr);
243 paddr += L1_CACHE_BYTES;
244 }
245}
246
247static inline
248void __cache_line_loop_v3(unsigned long paddr, unsigned long vaddr,
249 unsigned long sz, const int op)
250{
251 unsigned int aux_cmd, aux_tag;
252 int num_lines;
253 const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
254
255 if (op == OP_INV_IC) {
256 aux_cmd = ARC_REG_IC_IVIL;
257 aux_tag = ARC_REG_IC_PTAG;
258 } else {
259 aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
260 aux_tag = ARC_REG_DC_PTAG;
261 }
262
263 /* Ensure we properly floor/ceil the non-line aligned/sized requests
264 * and have @paddr - aligned to cache line and integral @num_lines.
265 * This however can be avoided for page sized since:
266 * -@paddr will be cache-line aligned already (being page aligned)
267 * -@sz will be integral multiple of line size (being page sized).
268 */
269 if (!full_page) {
270 sz += paddr & ~CACHE_LINE_MASK;
271 paddr &= CACHE_LINE_MASK;
272 vaddr &= CACHE_LINE_MASK;
273 }
274 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
275
276 /*
277 * MMUv3, cache ops require paddr in PTAG reg
278 * if V-P const for loop, PTAG can be written once outside loop
279 */
280 if (full_page)
281 write_aux_reg(aux_tag, paddr);
282
283 while (num_lines-- > 0) {
284 if (!full_page) {
Vineet Guptad4599ba2013-09-05 14:45:51 +0530285 write_aux_reg(aux_tag, paddr);
286 paddr += L1_CACHE_BYTES;
287 }
Vineet Guptabd129762013-09-05 13:43:03 +0530288
289 write_aux_reg(aux_cmd, vaddr);
290 vaddr += L1_CACHE_BYTES;
Vineet Guptabd129762013-09-05 13:43:03 +0530291 }
292}
Vineet Gupta95d69762013-01-18 15:12:19 +0530293
Vineet Guptad1f317d2015-04-06 17:23:57 +0530294/*
295 * In HS38x (MMU v4), although icache is VIPT, only paddr is needed for cache
296 * maintenance ops (in IVIL reg), as long as icache doesn't alias.
297 *
298 * For Aliasing icache, vaddr is also needed (in IVIL), while paddr is
299 * specified in PTAG (similar to MMU v3)
300 */
301static inline
302void __cache_line_loop_v4(unsigned long paddr, unsigned long vaddr,
303 unsigned long sz, const int cacheop)
304{
305 unsigned int aux_cmd;
306 int num_lines;
307 const int full_page_op = __builtin_constant_p(sz) && sz == PAGE_SIZE;
308
309 if (cacheop == OP_INV_IC) {
310 aux_cmd = ARC_REG_IC_IVIL;
311 } else {
312 /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
313 aux_cmd = cacheop & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
314 }
315
316 /* Ensure we properly floor/ceil the non-line aligned/sized requests
317 * and have @paddr - aligned to cache line and integral @num_lines.
318 * This however can be avoided for page sized since:
319 * -@paddr will be cache-line aligned already (being page aligned)
320 * -@sz will be integral multiple of line size (being page sized).
321 */
322 if (!full_page_op) {
323 sz += paddr & ~CACHE_LINE_MASK;
324 paddr &= CACHE_LINE_MASK;
325 }
326
327 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
328
329 while (num_lines-- > 0) {
330 write_aux_reg(aux_cmd, paddr);
331 paddr += L1_CACHE_BYTES;
332 }
333}
334
Vineet Gupta11e14892014-08-04 08:32:31 -0700335#if (CONFIG_ARC_MMU_VER < 3)
336#define __cache_line_loop __cache_line_loop_v2
337#elif (CONFIG_ARC_MMU_VER == 3)
338#define __cache_line_loop __cache_line_loop_v3
Vineet Guptad1f317d2015-04-06 17:23:57 +0530339#elif (CONFIG_ARC_MMU_VER > 3)
340#define __cache_line_loop __cache_line_loop_v4
Vineet Gupta11e14892014-08-04 08:32:31 -0700341#endif
342
Vineet Gupta95d69762013-01-18 15:12:19 +0530343#ifdef CONFIG_ARC_HAS_DCACHE
344
345/***************************************************************
346 * Machine specific helpers for Entire D-Cache or Per Line ops
347 */
348
Vineet Gupta6c310682015-06-04 08:53:47 +0530349static inline void __before_dc_op(const int op)
Vineet Gupta95d69762013-01-18 15:12:19 +0530350{
Vineet Gupta1b1a22b2014-06-29 19:03:58 +0530351 if (op == OP_FLUSH_N_INV) {
352 /* Dcache provides 2 cmd: FLUSH or INV
353 * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
354 * flush-n-inv is achieved by INV cmd but with IM=1
355 * So toggle INV sub-mode depending on op request and default
356 */
Vineet Gupta6c310682015-06-04 08:53:47 +0530357 const unsigned int ctl = ARC_REG_DC_CTRL;
358 write_aux_reg(ctl, read_aux_reg(ctl) | DC_CTRL_INV_MODE_FLUSH);
Vineet Gupta1b1a22b2014-06-29 19:03:58 +0530359 }
Vineet Gupta1b1a22b2014-06-29 19:03:58 +0530360}
361
Vineet Gupta6c310682015-06-04 08:53:47 +0530362static inline void __after_dc_op(const int op)
Vineet Gupta1b1a22b2014-06-29 19:03:58 +0530363{
Vineet Gupta6c310682015-06-04 08:53:47 +0530364 if (op & OP_FLUSH) {
365 const unsigned int ctl = ARC_REG_DC_CTRL;
366 unsigned int reg;
Vineet Gupta1b1a22b2014-06-29 19:03:58 +0530367
Vineet Gupta6c310682015-06-04 08:53:47 +0530368 /* flush / flush-n-inv both wait */
369 while ((reg = read_aux_reg(ctl)) & DC_CTRL_FLUSH_STATUS)
370 ;
371
372 /* Switch back to default Invalidate mode */
373 if (op == OP_FLUSH_N_INV)
374 write_aux_reg(ctl, reg & ~DC_CTRL_INV_MODE_FLUSH);
375 }
Vineet Gupta95d69762013-01-18 15:12:19 +0530376}
377
378/*
379 * Operation on Entire D-Cache
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530380 * @op = {OP_INV, OP_FLUSH, OP_FLUSH_N_INV}
Vineet Gupta95d69762013-01-18 15:12:19 +0530381 * Note that constant propagation ensures all the checks are gone
382 * in generated code
383 */
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530384static inline void __dc_entire_op(const int op)
Vineet Gupta95d69762013-01-18 15:12:19 +0530385{
Vineet Gupta95d69762013-01-18 15:12:19 +0530386 int aux;
387
Vineet Gupta6c310682015-06-04 08:53:47 +0530388 __before_dc_op(op);
Vineet Gupta95d69762013-01-18 15:12:19 +0530389
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530390 if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */
Vineet Gupta95d69762013-01-18 15:12:19 +0530391 aux = ARC_REG_DC_IVDC;
392 else
393 aux = ARC_REG_DC_FLSH;
394
395 write_aux_reg(aux, 0x1);
396
Vineet Gupta6c310682015-06-04 08:53:47 +0530397 __after_dc_op(op);
Vineet Gupta95d69762013-01-18 15:12:19 +0530398}
399
Vineet Gupta4102b532013-05-09 21:54:51 +0530400/* For kernel mappings cache operation: index is same as paddr */
Vineet Gupta6ec18a82013-05-09 15:10:18 +0530401#define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op)
402
Vineet Gupta95d69762013-01-18 15:12:19 +0530403/*
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530404 * D-Cache Line ops: Per Line INV (discard or wback+discard) or FLUSH (wback)
Vineet Gupta95d69762013-01-18 15:12:19 +0530405 */
Vineet Gupta6ec18a82013-05-09 15:10:18 +0530406static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr,
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530407 unsigned long sz, const int op)
Vineet Gupta95d69762013-01-18 15:12:19 +0530408{
Vineet Gupta1b1a22b2014-06-29 19:03:58 +0530409 unsigned long flags;
Vineet Gupta95d69762013-01-18 15:12:19 +0530410
411 local_irq_save(flags);
412
Vineet Gupta6c310682015-06-04 08:53:47 +0530413 __before_dc_op(op);
Vineet Gupta95d69762013-01-18 15:12:19 +0530414
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530415 __cache_line_loop(paddr, vaddr, sz, op);
Vineet Gupta95d69762013-01-18 15:12:19 +0530416
Vineet Gupta6c310682015-06-04 08:53:47 +0530417 __after_dc_op(op);
Vineet Gupta95d69762013-01-18 15:12:19 +0530418
419 local_irq_restore(flags);
420}
421
422#else
423
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530424#define __dc_entire_op(op)
425#define __dc_line_op(paddr, vaddr, sz, op)
426#define __dc_line_op_k(paddr, sz, op)
Vineet Gupta95d69762013-01-18 15:12:19 +0530427
428#endif /* CONFIG_ARC_HAS_DCACHE */
429
Vineet Gupta95d69762013-01-18 15:12:19 +0530430#ifdef CONFIG_ARC_HAS_ICACHE
431
Vineet Guptaaf5abf12014-07-09 14:59:47 +0530432static inline void __ic_entire_inv(void)
433{
434 write_aux_reg(ARC_REG_IC_IVIC, 1);
435 read_aux_reg(ARC_REG_IC_CTRL); /* blocks */
436}
437
438static inline void
439__ic_line_inv_vaddr_local(unsigned long paddr, unsigned long vaddr,
440 unsigned long sz)
Vineet Gupta95d69762013-01-18 15:12:19 +0530441{
442 unsigned long flags;
Vineet Gupta95d69762013-01-18 15:12:19 +0530443
444 local_irq_save(flags);
Vineet Guptabcc4d652015-06-04 14:39:15 +0530445 (*_cache_line_loop_ic_fn)(paddr, vaddr, sz, OP_INV_IC);
Vineet Gupta95d69762013-01-18 15:12:19 +0530446 local_irq_restore(flags);
447}
448
Vineet Guptaaf5abf12014-07-09 14:59:47 +0530449#ifndef CONFIG_SMP
Vineet Gupta336e1992013-06-22 19:22:42 +0530450
Vineet Guptaaf5abf12014-07-09 14:59:47 +0530451#define __ic_line_inv_vaddr(p, v, s) __ic_line_inv_vaddr_local(p, v, s)
452
453#else
454
455struct ic_inv_args {
Vineet Gupta2328af02013-02-17 12:51:42 +0200456 unsigned long paddr, vaddr;
457 int sz;
458};
459
460static void __ic_line_inv_vaddr_helper(void *info)
461{
Noam Camus014018e2014-09-03 14:41:11 +0300462 struct ic_inv_args *ic_inv = info;
Vineet Guptaaf5abf12014-07-09 14:59:47 +0530463
Vineet Gupta2328af02013-02-17 12:51:42 +0200464 __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz);
465}
466
467static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
468 unsigned long sz)
469{
Vineet Guptaaf5abf12014-07-09 14:59:47 +0530470 struct ic_inv_args ic_inv = {
471 .paddr = paddr,
472 .vaddr = vaddr,
473 .sz = sz
474 };
475
Vineet Gupta2328af02013-02-17 12:51:42 +0200476 on_each_cpu(__ic_line_inv_vaddr_helper, &ic_inv, 1);
477}
Vineet Guptaaf5abf12014-07-09 14:59:47 +0530478
479#endif /* CONFIG_SMP */
480
481#else /* !CONFIG_ARC_HAS_ICACHE */
Vineet Gupta95d69762013-01-18 15:12:19 +0530482
Vineet Gupta336e1992013-06-22 19:22:42 +0530483#define __ic_entire_inv()
Vineet Gupta95d69762013-01-18 15:12:19 +0530484#define __ic_line_inv_vaddr(pstart, vstart, sz)
485
486#endif /* CONFIG_ARC_HAS_ICACHE */
487
Vineet Gupta795f4552015-04-03 12:37:07 +0300488noinline void slc_op(unsigned long paddr, unsigned long sz, const int op)
489{
490#ifdef CONFIG_ISA_ARCV2
Alexey Brodkinb607edd2015-06-29 15:24:37 +0300491 /*
492 * SLC is shared between all cores and concurrent aux operations from
493 * multiple cores need to be serialized using a spinlock
494 * A concurrent operation can be silently ignored and/or the old/new
495 * operation can remain incomplete forever (lockup in SLC_CTRL_BUSY loop
496 * below)
497 */
498 static DEFINE_SPINLOCK(lock);
Vineet Gupta795f4552015-04-03 12:37:07 +0300499 unsigned long flags;
500 unsigned int ctrl;
501
Alexey Brodkinb607edd2015-06-29 15:24:37 +0300502 spin_lock_irqsave(&lock, flags);
Vineet Gupta795f4552015-04-03 12:37:07 +0300503
504 /*
505 * The Region Flush operation is specified by CTRL.RGN_OP[11..9]
506 * - b'000 (default) is Flush,
507 * - b'001 is Invalidate if CTRL.IM == 0
508 * - b'001 is Flush-n-Invalidate if CTRL.IM == 1
509 */
510 ctrl = read_aux_reg(ARC_REG_SLC_CTRL);
511
512 /* Don't rely on default value of IM bit */
513 if (!(op & OP_FLUSH)) /* i.e. OP_INV */
514 ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
515 else
516 ctrl |= SLC_CTRL_IM;
517
518 if (op & OP_INV)
519 ctrl |= SLC_CTRL_RGN_OP_INV; /* Inv or flush-n-inv */
520 else
521 ctrl &= ~SLC_CTRL_RGN_OP_INV;
522
523 write_aux_reg(ARC_REG_SLC_CTRL, ctrl);
524
525 /*
526 * Lower bits are ignored, no need to clip
527 * END needs to be setup before START (latter triggers the operation)
528 * END can't be same as START, so add (l2_line_sz - 1) to sz
529 */
530 write_aux_reg(ARC_REG_SLC_RGN_END, (paddr + sz + l2_line_sz - 1));
531 write_aux_reg(ARC_REG_SLC_RGN_START, paddr);
532
533 while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
534
Alexey Brodkinb607edd2015-06-29 15:24:37 +0300535 spin_unlock_irqrestore(&lock, flags);
Vineet Gupta795f4552015-04-03 12:37:07 +0300536#endif
537}
538
Vineet Gupta95d69762013-01-18 15:12:19 +0530539/***********************************************************
540 * Exported APIs
541 */
542
Vineet Gupta4102b532013-05-09 21:54:51 +0530543/*
544 * Handle cache congruency of kernel and userspace mappings of page when kernel
545 * writes-to/reads-from
546 *
547 * The idea is to defer flushing of kernel mapping after a WRITE, possible if:
548 * -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
549 * -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
550 * -In SMP, if hardware caches are coherent
551 *
552 * There's a corollary case, where kernel READs from a userspace mapped page.
553 * If the U-mapping is not congruent to to K-mapping, former needs flushing.
554 */
Vineet Gupta95d69762013-01-18 15:12:19 +0530555void flush_dcache_page(struct page *page)
556{
Vineet Gupta4102b532013-05-09 21:54:51 +0530557 struct address_space *mapping;
558
559 if (!cache_is_vipt_aliasing()) {
Vineet Gupta2ed21da2013-05-13 17:23:58 +0530560 clear_bit(PG_dc_clean, &page->flags);
Vineet Gupta4102b532013-05-09 21:54:51 +0530561 return;
562 }
563
564 /* don't handle anon pages here */
565 mapping = page_mapping(page);
566 if (!mapping)
567 return;
568
569 /*
570 * pagecache page, file not yet mapped to userspace
571 * Make a note that K-mapping is dirty
572 */
573 if (!mapping_mapped(mapping)) {
Vineet Gupta2ed21da2013-05-13 17:23:58 +0530574 clear_bit(PG_dc_clean, &page->flags);
Vineet Gupta4102b532013-05-09 21:54:51 +0530575 } else if (page_mapped(page)) {
576
577 /* kernel reading from page with U-mapping */
Vineet Gupta45309492015-05-18 12:46:37 +0530578 unsigned long paddr = (unsigned long)page_address(page);
Vineet Gupta4102b532013-05-09 21:54:51 +0530579 unsigned long vaddr = page->index << PAGE_CACHE_SHIFT;
580
581 if (addr_not_cache_congruent(paddr, vaddr))
582 __flush_dcache_page(paddr, vaddr);
583 }
Vineet Gupta95d69762013-01-18 15:12:19 +0530584}
585EXPORT_SYMBOL(flush_dcache_page);
586
Alexey Brodkinf2b0b252015-05-25 19:54:28 +0300587/*
588 * DMA ops for systems with L1 cache only
589 * Make memory coherent with L1 cache by flushing/invalidating L1 lines
590 */
591static void __dma_cache_wback_inv_l1(unsigned long start, unsigned long sz)
Vineet Gupta95d69762013-01-18 15:12:19 +0530592{
Vineet Gupta6ec18a82013-05-09 15:10:18 +0530593 __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
Alexey Brodkinf2b0b252015-05-25 19:54:28 +0300594}
Vineet Gupta795f4552015-04-03 12:37:07 +0300595
Alexey Brodkinf2b0b252015-05-25 19:54:28 +0300596static void __dma_cache_inv_l1(unsigned long start, unsigned long sz)
597{
598 __dc_line_op_k(start, sz, OP_INV);
599}
600
601static void __dma_cache_wback_l1(unsigned long start, unsigned long sz)
602{
603 __dc_line_op_k(start, sz, OP_FLUSH);
604}
605
606/*
607 * DMA ops for systems with both L1 and L2 caches, but without IOC
608 * Both L1 and L2 lines need to be explicity flushed/invalidated
609 */
610static void __dma_cache_wback_inv_slc(unsigned long start, unsigned long sz)
611{
612 __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
613 slc_op(start, sz, OP_FLUSH_N_INV);
614}
615
616static void __dma_cache_inv_slc(unsigned long start, unsigned long sz)
617{
618 __dc_line_op_k(start, sz, OP_INV);
619 slc_op(start, sz, OP_INV);
620}
621
622static void __dma_cache_wback_slc(unsigned long start, unsigned long sz)
623{
624 __dc_line_op_k(start, sz, OP_FLUSH);
625 slc_op(start, sz, OP_FLUSH);
626}
627
628/*
629 * DMA ops for systems with IOC
630 * IOC hardware snoops all DMA traffic keeping the caches consistent with
631 * memory - eliding need for any explicit cache maintenance of DMA buffers
632 */
633static void __dma_cache_wback_inv_ioc(unsigned long start, unsigned long sz) {}
634static void __dma_cache_inv_ioc(unsigned long start, unsigned long sz) {}
635static void __dma_cache_wback_ioc(unsigned long start, unsigned long sz) {}
636
637/*
638 * Exported DMA API
639 */
640void dma_cache_wback_inv(unsigned long start, unsigned long sz)
641{
642 __dma_cache_wback_inv(start, sz);
Vineet Gupta95d69762013-01-18 15:12:19 +0530643}
644EXPORT_SYMBOL(dma_cache_wback_inv);
645
646void dma_cache_inv(unsigned long start, unsigned long sz)
647{
Alexey Brodkinf2b0b252015-05-25 19:54:28 +0300648 __dma_cache_inv(start, sz);
Vineet Gupta95d69762013-01-18 15:12:19 +0530649}
650EXPORT_SYMBOL(dma_cache_inv);
651
652void dma_cache_wback(unsigned long start, unsigned long sz)
653{
Alexey Brodkinf2b0b252015-05-25 19:54:28 +0300654 __dma_cache_wback(start, sz);
Vineet Gupta95d69762013-01-18 15:12:19 +0530655}
656EXPORT_SYMBOL(dma_cache_wback);
657
658/*
Vineet Gupta7586bf722013-04-12 12:18:25 +0530659 * This is API for making I/D Caches consistent when modifying
660 * kernel code (loadable modules, kprobes, kgdb...)
Vineet Gupta95d69762013-01-18 15:12:19 +0530661 * This is called on insmod, with kernel virtual address for CODE of
662 * the module. ARC cache maintenance ops require PHY address thus we
663 * need to convert vmalloc addr to PHY addr
664 */
665void flush_icache_range(unsigned long kstart, unsigned long kend)
666{
Vineet Guptac59414c2014-09-24 11:36:20 +0530667 unsigned int tot_sz;
Vineet Gupta95d69762013-01-18 15:12:19 +0530668
Vineet Guptac59414c2014-09-24 11:36:20 +0530669 WARN(kstart < TASK_SIZE, "%s() can't handle user vaddr", __func__);
Vineet Gupta95d69762013-01-18 15:12:19 +0530670
671 /* Shortcut for bigger flush ranges.
672 * Here we don't care if this was kernel virtual or phy addr
673 */
674 tot_sz = kend - kstart;
675 if (tot_sz > PAGE_SIZE) {
676 flush_cache_all();
677 return;
678 }
679
680 /* Case: Kernel Phy addr (0x8000_0000 onwards) */
681 if (likely(kstart > PAGE_OFFSET)) {
Vineet Gupta7586bf722013-04-12 12:18:25 +0530682 /*
683 * The 2nd arg despite being paddr will be used to index icache
684 * This is OK since no alternate virtual mappings will exist
685 * given the callers for this case: kprobe/kgdb in built-in
686 * kernel code only.
687 */
Vineet Gupta94bad1a2013-04-12 12:20:23 +0530688 __sync_icache_dcache(kstart, kstart, kend - kstart);
Vineet Gupta95d69762013-01-18 15:12:19 +0530689 return;
690 }
691
692 /*
693 * Case: Kernel Vaddr (0x7000_0000 to 0x7fff_ffff)
694 * (1) ARC Cache Maintenance ops only take Phy addr, hence special
695 * handling of kernel vaddr.
696 *
697 * (2) Despite @tot_sz being < PAGE_SIZE (bigger cases handled already),
698 * it still needs to handle a 2 page scenario, where the range
699 * straddles across 2 virtual pages and hence need for loop
700 */
701 while (tot_sz > 0) {
Vineet Guptac59414c2014-09-24 11:36:20 +0530702 unsigned int off, sz;
703 unsigned long phy, pfn;
704
Vineet Gupta95d69762013-01-18 15:12:19 +0530705 off = kstart % PAGE_SIZE;
706 pfn = vmalloc_to_pfn((void *)kstart);
707 phy = (pfn << PAGE_SHIFT) + off;
708 sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off);
Vineet Gupta94bad1a2013-04-12 12:20:23 +0530709 __sync_icache_dcache(phy, kstart, sz);
Vineet Gupta95d69762013-01-18 15:12:19 +0530710 kstart += sz;
711 tot_sz -= sz;
712 }
713}
Pranith Kumare3560302014-08-29 15:19:09 -0700714EXPORT_SYMBOL(flush_icache_range);
Vineet Gupta95d69762013-01-18 15:12:19 +0530715
716/*
Vineet Gupta94bad1a2013-04-12 12:20:23 +0530717 * General purpose helper to make I and D cache lines consistent.
718 * @paddr is phy addr of region
Vineet Gupta4b06ff32013-07-10 11:40:27 +0530719 * @vaddr is typically user vaddr (breakpoint) or kernel vaddr (vmalloc)
720 * However in one instance, when called by kprobe (for a breakpt in
Vineet Gupta94bad1a2013-04-12 12:20:23 +0530721 * builtin kernel code) @vaddr will be paddr only, meaning CDU operation will
722 * use a paddr to index the cache (despite VIPT). This is fine since since a
Vineet Gupta4b06ff32013-07-10 11:40:27 +0530723 * builtin kernel page will not have any virtual mappings.
724 * kprobe on loadable module will be kernel vaddr.
Vineet Gupta95d69762013-01-18 15:12:19 +0530725 */
Vineet Gupta94bad1a2013-04-12 12:20:23 +0530726void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len)
Vineet Gupta95d69762013-01-18 15:12:19 +0530727{
Vineet Guptaf5388812013-05-16 12:19:29 +0530728 __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
Vineet Gupta2328af02013-02-17 12:51:42 +0200729 __ic_line_inv_vaddr(paddr, vaddr, len);
Vineet Gupta95d69762013-01-18 15:12:19 +0530730}
731
Vineet Gupta24603fd2013-04-11 18:36:35 +0530732/* wrapper to compile time eliminate alignment checks in flush loop */
733void __inv_icache_page(unsigned long paddr, unsigned long vaddr)
Vineet Gupta95d69762013-01-18 15:12:19 +0530734{
Vineet Gupta24603fd2013-04-11 18:36:35 +0530735 __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
Vineet Gupta95d69762013-01-18 15:12:19 +0530736}
737
Vineet Gupta6ec18a82013-05-09 15:10:18 +0530738/*
739 * wrapper to clearout kernel or userspace mappings of a page
740 * For kernel mappings @vaddr == @paddr
741 */
Vineet Gupta45309492015-05-18 12:46:37 +0530742void __flush_dcache_page(unsigned long paddr, unsigned long vaddr)
Vineet Guptaeacd0e92013-04-16 14:10:48 +0530743{
Vineet Gupta6ec18a82013-05-09 15:10:18 +0530744 __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV);
Vineet Guptaeacd0e92013-04-16 14:10:48 +0530745}
746
Vineet Gupta95d69762013-01-18 15:12:19 +0530747noinline void flush_cache_all(void)
748{
749 unsigned long flags;
750
751 local_irq_save(flags);
752
Vineet Gupta336e1992013-06-22 19:22:42 +0530753 __ic_entire_inv();
Vineet Gupta95d69762013-01-18 15:12:19 +0530754 __dc_entire_op(OP_FLUSH_N_INV);
755
756 local_irq_restore(flags);
757
758}
759
Vineet Gupta4102b532013-05-09 21:54:51 +0530760#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
761
762void flush_cache_mm(struct mm_struct *mm)
763{
764 flush_cache_all();
765}
766
767void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
768 unsigned long pfn)
769{
770 unsigned int paddr = pfn << PAGE_SHIFT;
771
Vineet Gupta5971bc72013-05-16 12:23:31 +0530772 u_vaddr &= PAGE_MASK;
773
Vineet Gupta45309492015-05-18 12:46:37 +0530774 __flush_dcache_page(paddr, u_vaddr);
Vineet Gupta5971bc72013-05-16 12:23:31 +0530775
776 if (vma->vm_flags & VM_EXEC)
777 __inv_icache_page(paddr, u_vaddr);
Vineet Gupta4102b532013-05-09 21:54:51 +0530778}
779
780void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
781 unsigned long end)
782{
783 flush_cache_all();
784}
785
Vineet Gupta7bb66f62013-05-25 14:04:25 +0530786void flush_anon_page(struct vm_area_struct *vma, struct page *page,
787 unsigned long u_vaddr)
788{
789 /* TBD: do we really need to clear the kernel mapping */
790 __flush_dcache_page(page_address(page), u_vaddr);
791 __flush_dcache_page(page_address(page), page_address(page));
792
793}
794
795#endif
796
Vineet Gupta4102b532013-05-09 21:54:51 +0530797void copy_user_highpage(struct page *to, struct page *from,
798 unsigned long u_vaddr, struct vm_area_struct *vma)
799{
Vineet Gupta45309492015-05-18 12:46:37 +0530800 unsigned long kfrom = (unsigned long)page_address(from);
801 unsigned long kto = (unsigned long)page_address(to);
Vineet Gupta4102b532013-05-09 21:54:51 +0530802 int clean_src_k_mappings = 0;
803
804 /*
805 * If SRC page was already mapped in userspace AND it's U-mapping is
806 * not congruent with K-mapping, sync former to physical page so that
807 * K-mapping in memcpy below, sees the right data
808 *
809 * Note that while @u_vaddr refers to DST page's userspace vaddr, it is
810 * equally valid for SRC page as well
811 */
812 if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
813 __flush_dcache_page(kfrom, u_vaddr);
814 clean_src_k_mappings = 1;
815 }
816
Vineet Gupta45309492015-05-18 12:46:37 +0530817 copy_page((void *)kto, (void *)kfrom);
Vineet Gupta4102b532013-05-09 21:54:51 +0530818
819 /*
820 * Mark DST page K-mapping as dirty for a later finalization by
821 * update_mmu_cache(). Although the finalization could have been done
822 * here as well (given that both vaddr/paddr are available).
823 * But update_mmu_cache() already has code to do that for other
824 * non copied user pages (e.g. read faults which wire in pagecache page
825 * directly).
826 */
Vineet Gupta2ed21da2013-05-13 17:23:58 +0530827 clear_bit(PG_dc_clean, &to->flags);
Vineet Gupta4102b532013-05-09 21:54:51 +0530828
829 /*
830 * if SRC was already usermapped and non-congruent to kernel mapping
831 * sync the kernel mapping back to physical page
832 */
833 if (clean_src_k_mappings) {
834 __flush_dcache_page(kfrom, kfrom);
Vineet Gupta2ed21da2013-05-13 17:23:58 +0530835 set_bit(PG_dc_clean, &from->flags);
Vineet Gupta4102b532013-05-09 21:54:51 +0530836 } else {
Vineet Gupta2ed21da2013-05-13 17:23:58 +0530837 clear_bit(PG_dc_clean, &from->flags);
Vineet Gupta4102b532013-05-09 21:54:51 +0530838 }
839}
840
841void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
842{
843 clear_page(to);
Vineet Gupta2ed21da2013-05-13 17:23:58 +0530844 clear_bit(PG_dc_clean, &page->flags);
Vineet Gupta4102b532013-05-09 21:54:51 +0530845}
846
Vineet Gupta4102b532013-05-09 21:54:51 +0530847
Vineet Gupta95d69762013-01-18 15:12:19 +0530848/**********************************************************************
849 * Explicit Cache flush request from user space via syscall
850 * Needed for JITs which generate code on the fly
851 */
852SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
853{
854 /* TBD: optimize this */
855 flush_cache_all();
856 return 0;
857}
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530858
859void arc_cache_init(void)
860{
861 unsigned int __maybe_unused cpu = smp_processor_id();
862 char str[256];
863
864 printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
865
866 if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
867 struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
868
869 if (!ic->ver)
870 panic("cache support enabled but non-existent cache\n");
871
872 if (ic->line_len != L1_CACHE_BYTES)
873 panic("ICache line [%d] != kernel Config [%d]",
874 ic->line_len, L1_CACHE_BYTES);
875
876 if (ic->ver != CONFIG_ARC_MMU_VER)
877 panic("Cache ver [%d] doesn't match MMU ver [%d]\n",
878 ic->ver, CONFIG_ARC_MMU_VER);
Vineet Guptabcc4d652015-06-04 14:39:15 +0530879
880 /*
881 * In MMU v4 (HS38x) the alising icache config uses IVIL/PTAG
882 * pair to provide vaddr/paddr respectively, just as in MMU v3
883 */
884 if (is_isa_arcv2() && ic->alias)
885 _cache_line_loop_ic_fn = __cache_line_loop_v3;
886 else
887 _cache_line_loop_ic_fn = __cache_line_loop;
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530888 }
889
890 if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) {
891 struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530892
893 if (!dc->ver)
894 panic("cache support enabled but non-existent cache\n");
895
896 if (dc->line_len != L1_CACHE_BYTES)
897 panic("DCache line [%d] != kernel Config [%d]",
898 dc->line_len, L1_CACHE_BYTES);
899
Vineet Guptad1f317d2015-04-06 17:23:57 +0530900 /* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */
901 if (is_isa_arcompact()) {
902 int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530903
Vineet Guptad1f317d2015-04-06 17:23:57 +0530904 if (dc->alias && !handled)
905 panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
906 else if (!dc->alias && handled)
907 panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
908 }
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530909 }
Alexey Brodkinf2b0b252015-05-25 19:54:28 +0300910
911 if (is_isa_arcv2() && ioc_exists) {
912 /* IO coherency base - 0x8z */
913 write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000);
914 /* IO coherency aperture size - 512Mb: 0x8z-0xAz */
915 write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, 0x11);
916 /* Enable partial writes */
917 write_aux_reg(ARC_REG_IO_COH_PARTIAL, 1);
918 /* Enable IO coherency */
919 write_aux_reg(ARC_REG_IO_COH_ENABLE, 1);
920
921 __dma_cache_wback_inv = __dma_cache_wback_inv_ioc;
922 __dma_cache_inv = __dma_cache_inv_ioc;
923 __dma_cache_wback = __dma_cache_wback_ioc;
924 } else if (is_isa_arcv2() && l2_line_sz) {
925 __dma_cache_wback_inv = __dma_cache_wback_inv_slc;
926 __dma_cache_inv = __dma_cache_inv_slc;
927 __dma_cache_wback = __dma_cache_wback_slc;
928 } else {
929 __dma_cache_wback_inv = __dma_cache_wback_inv_l1;
930 __dma_cache_inv = __dma_cache_inv_l1;
931 __dma_cache_wback = __dma_cache_wback_l1;
932 }
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530933}