blob: a2fbea3ee07c7627873325e59f927784e952b267 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Vineet Gupta95d69762013-01-18 15:12:19 +05302/*
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +05303 * ARC Cache Management
Vineet Gupta95d69762013-01-18 15:12:19 +05304 *
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +05305 * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
Vineet Gupta95d69762013-01-18 15:12:19 +05306 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
Vineet Gupta95d69762013-01-18 15:12:19 +05307 */
8
9#include <linux/module.h>
10#include <linux/mm.h>
11#include <linux/sched.h>
12#include <linux/cache.h>
13#include <linux/mmu_context.h>
14#include <linux/syscalls.h>
15#include <linux/uaccess.h>
Vineet Gupta4102b532013-05-09 21:54:51 +053016#include <linux/pagemap.h>
Vineet Gupta95d69762013-01-18 15:12:19 +053017#include <asm/cacheflush.h>
18#include <asm/cachectl.h>
19#include <asm/setup.h>
20
Vineet Gupta0d771172014-08-29 10:55:15 +053021#ifdef CONFIG_ISA_ARCV2
22#define USE_RGN_FLSH 1
23#endif
24
Vineet Gupta795f4552015-04-03 12:37:07 +030025static int l2_line_sz;
Vineet Guptacf986d42016-10-13 15:58:59 -070026static int ioc_exists;
Vineet Guptad0e73e22017-01-17 11:09:18 -080027int slc_enable = 1, ioc_enable = 1;
Vineet Guptadeaf7562015-10-24 19:31:16 +053028unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */
Vineet Gupta26c01c42016-08-26 15:41:29 -070029unsigned long perip_end = 0xFFFFFFFF; /* legacy value */
Vineet Gupta795f4552015-04-03 12:37:07 +030030
Vineet Gupta28b4af72015-09-14 18:43:42 -070031void (*_cache_line_loop_ic_fn)(phys_addr_t paddr, unsigned long vaddr,
Vineet Gupta7d3d1622017-01-23 19:32:23 -080032 unsigned long sz, const int op, const int full_page);
Vineet Guptabcc4d652015-06-04 14:39:15 +053033
Vineet Guptaf5db19e2016-03-16 15:04:39 +053034void (*__dma_cache_wback_inv)(phys_addr_t start, unsigned long sz);
35void (*__dma_cache_inv)(phys_addr_t start, unsigned long sz);
36void (*__dma_cache_wback)(phys_addr_t start, unsigned long sz);
Alexey Brodkinf2b0b252015-05-25 19:54:28 +030037
Vineet Guptac3441ed2014-02-24 11:42:50 +080038char *arc_cache_mumbojumbo(int c, char *buf, int len)
Vineet Guptaaf617422013-01-18 15:12:24 +053039{
40 int n = 0;
Vineet Guptad1f317d2015-04-06 17:23:57 +053041 struct cpuinfo_arc_cache *p;
Vineet Guptaaf617422013-01-18 15:12:24 +053042
Vineet Guptada40ff42014-06-27 15:49:47 +053043#define PR_CACHE(p, cfg, str) \
Vineet Guptaf64915b2016-12-19 11:24:08 -080044 if (!(p)->line_len) \
Vineet Guptaaf617422013-01-18 15:12:24 +053045 n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \
46 else \
47 n += scnprintf(buf + n, len - n, \
Vineet Guptada40ff42014-06-27 15:49:47 +053048 str"\t\t: %uK, %dway/set, %uB Line, %s%s%s\n", \
49 (p)->sz_k, (p)->assoc, (p)->line_len, \
50 (p)->vipt ? "VIPT" : "PIPT", \
51 (p)->alias ? " aliasing" : "", \
Vineet Gupta964cf282015-10-02 19:20:27 +053052 IS_USED_CFG(cfg));
Vineet Guptaaf617422013-01-18 15:12:24 +053053
Vineet Guptada40ff42014-06-27 15:49:47 +053054 PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache");
55 PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");
Vineet Guptaaf617422013-01-18 15:12:24 +053056
Vineet Guptad1f317d2015-04-06 17:23:57 +053057 p = &cpuinfo_arc700[c].slc;
Vineet Guptaf64915b2016-12-19 11:24:08 -080058 if (p->line_len)
Vineet Guptad1f317d2015-04-06 17:23:57 +053059 n += scnprintf(buf + n, len - n,
Vineet Gupta79335a22015-06-04 18:30:23 +053060 "SLC\t\t: %uK, %uB Line%s\n",
61 p->sz_k, p->line_len, IS_USED_RUN(slc_enable));
Vineet Guptad1f317d2015-04-06 17:23:57 +053062
Vineet Gupta711c1f22016-10-13 15:53:02 -070063 n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n",
64 perip_base,
Eugeniy Paltsev2820a702018-07-30 19:26:34 +030065 IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency (per-device) "));
Alexey Brodkinf2b0b252015-05-25 19:54:28 +030066
Vineet Guptaaf617422013-01-18 15:12:24 +053067 return buf;
68}
69
Vineet Gupta95d69762013-01-18 15:12:19 +053070/*
71 * Read the Cache Build Confuration Registers, Decode them and save into
72 * the cpuinfo structure for later use.
73 * No Validation done here, simply read/convert the BCRs
74 */
Vineet Guptafd0881a22015-08-21 15:06:43 +053075static void read_decode_cache_bcr_arcv2(int cpu)
Vineet Gupta95d69762013-01-18 15:12:19 +053076{
Vineet Guptafd0881a22015-08-21 15:06:43 +053077 struct cpuinfo_arc_cache *p_slc = &cpuinfo_arc700[cpu].slc;
Vineet Guptad1f317d2015-04-06 17:23:57 +053078 struct bcr_generic sbcr;
79
80 struct bcr_slc_cfg {
81#ifdef CONFIG_CPU_BIG_ENDIAN
82 unsigned int pad:24, way:2, lsz:2, sz:4;
83#else
84 unsigned int sz:4, lsz:2, way:2, pad:24;
85#endif
86 } slc_cfg;
87
Alexey Brodkinf2b0b252015-05-25 19:54:28 +030088 struct bcr_clust_cfg {
89#ifdef CONFIG_CPU_BIG_ENDIAN
90 unsigned int pad:7, c:1, num_entries:8, num_cores:8, ver:8;
91#else
92 unsigned int ver:8, num_cores:8, num_entries:8, c:1, pad:7;
93#endif
94 } cbcr;
95
Vineet Gupta26c01c42016-08-26 15:41:29 -070096 struct bcr_volatile {
97#ifdef CONFIG_CPU_BIG_ENDIAN
98 unsigned int start:4, limit:4, pad:22, order:1, disable:1;
99#else
100 unsigned int disable:1, order:1, pad:22, limit:4, start:4;
101#endif
102 } vol;
103
104
Vineet Guptafd0881a22015-08-21 15:06:43 +0530105 READ_BCR(ARC_REG_SLC_BCR, sbcr);
106 if (sbcr.ver) {
107 READ_BCR(ARC_REG_SLC_CFG, slc_cfg);
Vineet Guptafd0881a22015-08-21 15:06:43 +0530108 p_slc->sz_k = 128 << slc_cfg.sz;
109 l2_line_sz = p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64;
110 }
111
112 READ_BCR(ARC_REG_CLUSTER_BCR, cbcr);
Vineet Gupta99bd5fc2019-03-21 17:19:37 -0700113 if (cbcr.c) {
Vineet Guptafd0881a22015-08-21 15:06:43 +0530114 ioc_exists = 1;
Vineet Gupta99bd5fc2019-03-21 17:19:37 -0700115
116 /*
117 * As for today we don't support both IOC and ZONE_HIGHMEM enabled
118 * simultaneously. This happens because as of today IOC aperture covers
119 * only ZONE_NORMAL (low mem) and any dma transactions outside this
120 * region won't be HW coherent.
121 * If we want to use both IOC and ZONE_HIGHMEM we can use
122 * bounce_buffer to handle dma transactions to HIGHMEM.
123 * Also it is possible to modify dma_direct cache ops or increase IOC
124 * aperture size if we are planning to use HIGHMEM without PAE.
125 */
126 if (IS_ENABLED(CONFIG_HIGHMEM) || is_pae40_enabled())
127 ioc_enable = 0;
128 } else {
Vineet Guptacf986d42016-10-13 15:58:59 -0700129 ioc_enable = 0;
Vineet Gupta99bd5fc2019-03-21 17:19:37 -0700130 }
Vineet Guptadeaf7562015-10-24 19:31:16 +0530131
Vineet Gupta26c01c42016-08-26 15:41:29 -0700132 /* HS 2.0 didn't have AUX_VOL */
133 if (cpuinfo_arc700[cpu].core.family > 0x51) {
134 READ_BCR(AUX_VOL, vol);
135 perip_base = vol.start << 28;
136 /* HS 3.0 has limit and strict-ordering fields */
137 if (cpuinfo_arc700[cpu].core.family > 0x52)
138 perip_end = (vol.limit << 28) - 1;
139 }
Vineet Guptafd0881a22015-08-21 15:06:43 +0530140}
141
142void read_decode_cache_bcr(void)
143{
144 struct cpuinfo_arc_cache *p_ic, *p_dc;
145 unsigned int cpu = smp_processor_id();
146 struct bcr_cache {
147#ifdef CONFIG_CPU_BIG_ENDIAN
148 unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
149#else
150 unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
151#endif
152 } ibcr, dbcr;
153
Vineet Gupta95d69762013-01-18 15:12:19 +0530154 p_ic = &cpuinfo_arc700[cpu].icache;
155 READ_BCR(ARC_REG_IC_BCR, ibcr);
156
Vineet Guptada40ff42014-06-27 15:49:47 +0530157 if (!ibcr.ver)
158 goto dc_chk;
159
Vineet Guptad1f317d2015-04-06 17:23:57 +0530160 if (ibcr.ver <= 3) {
161 BUG_ON(ibcr.config != 3);
162 p_ic->assoc = 2; /* Fixed to 2w set assoc */
163 } else if (ibcr.ver >= 4) {
164 p_ic->assoc = 1 << ibcr.config; /* 1,2,4,8 */
165 }
166
Vineet Gupta95d69762013-01-18 15:12:19 +0530167 p_ic->line_len = 8 << ibcr.line_len;
Vineet Guptada40ff42014-06-27 15:49:47 +0530168 p_ic->sz_k = 1 << (ibcr.sz - 1);
Vineet Guptada40ff42014-06-27 15:49:47 +0530169 p_ic->vipt = 1;
170 p_ic->alias = p_ic->sz_k/p_ic->assoc/TO_KB(PAGE_SIZE) > 1;
Vineet Gupta95d69762013-01-18 15:12:19 +0530171
Vineet Guptada40ff42014-06-27 15:49:47 +0530172dc_chk:
Vineet Gupta95d69762013-01-18 15:12:19 +0530173 p_dc = &cpuinfo_arc700[cpu].dcache;
174 READ_BCR(ARC_REG_DC_BCR, dbcr);
175
Vineet Guptada40ff42014-06-27 15:49:47 +0530176 if (!dbcr.ver)
Vineet Guptad1f317d2015-04-06 17:23:57 +0530177 goto slc_chk;
Vineet Guptada40ff42014-06-27 15:49:47 +0530178
Vineet Guptad1f317d2015-04-06 17:23:57 +0530179 if (dbcr.ver <= 3) {
180 BUG_ON(dbcr.config != 2);
181 p_dc->assoc = 4; /* Fixed to 4w set assoc */
182 p_dc->vipt = 1;
183 p_dc->alias = p_dc->sz_k/p_dc->assoc/TO_KB(PAGE_SIZE) > 1;
184 } else if (dbcr.ver >= 4) {
185 p_dc->assoc = 1 << dbcr.config; /* 1,2,4,8 */
186 p_dc->vipt = 0;
187 p_dc->alias = 0; /* PIPT so can't VIPT alias */
188 }
189
Vineet Gupta95d69762013-01-18 15:12:19 +0530190 p_dc->line_len = 16 << dbcr.line_len;
Vineet Guptada40ff42014-06-27 15:49:47 +0530191 p_dc->sz_k = 1 << (dbcr.sz - 1);
Vineet Guptad1f317d2015-04-06 17:23:57 +0530192
193slc_chk:
Vineet Guptafd0881a22015-08-21 15:06:43 +0530194 if (is_isa_arcv2())
195 read_decode_cache_bcr_arcv2(cpu);
Vineet Gupta95d69762013-01-18 15:12:19 +0530196}
197
198/*
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530199 * Line Operation on {I,D}-Cache
Vineet Gupta95d69762013-01-18 15:12:19 +0530200 */
Vineet Gupta95d69762013-01-18 15:12:19 +0530201
202#define OP_INV 0x1
203#define OP_FLUSH 0x2
204#define OP_FLUSH_N_INV 0x3
Vineet Guptabd129762013-09-05 13:43:03 +0530205#define OP_INV_IC 0x4
206
207/*
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530208 * I-Cache Aliasing in ARC700 VIPT caches (MMU v1-v3)
209 *
210 * ARC VIPT I-cache uses vaddr to index into cache and paddr to match the tag.
211 * The orig Cache Management Module "CDU" only required paddr to invalidate a
212 * certain line since it sufficed as index in Non-Aliasing VIPT cache-geometry.
213 * Infact for distinct V1,V2,P: all of {V1-P},{V2-P},{P-P} would end up fetching
214 * the exact same line.
215 *
216 * However for larger Caches (way-size > page-size) - i.e. in Aliasing config,
217 * paddr alone could not be used to correctly index the cache.
218 *
219 * ------------------
220 * MMU v1/v2 (Fixed Page Size 8k)
221 * ------------------
222 * The solution was to provide CDU with these additonal vaddr bits. These
223 * would be bits [x:13], x would depend on cache-geometry, 13 comes from
224 * standard page size of 8k.
225 * H/w folks chose [17:13] to be a future safe range, and moreso these 5 bits
226 * of vaddr could easily be "stuffed" in the paddr as bits [4:0] since the
227 * orig 5 bits of paddr were anyways ignored by CDU line ops, as they
228 * represent the offset within cache-line. The adv of using this "clumsy"
229 * interface for additional info was no new reg was needed in CDU programming
230 * model.
231 *
232 * 17:13 represented the max num of bits passable, actual bits needed were
233 * fewer, based on the num-of-aliases possible.
234 * -for 2 alias possibility, only bit 13 needed (32K cache)
235 * -for 4 alias possibility, bits 14:13 needed (64K cache)
236 *
237 * ------------------
238 * MMU v3
239 * ------------------
240 * This ver of MMU supports variable page sizes (1k-16k): although Linux will
241 * only support 8k (default), 16k and 4k.
Andrea Gelmini25474762016-05-21 13:45:35 +0200242 * However from hardware perspective, smaller page sizes aggravate aliasing
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530243 * meaning more vaddr bits needed to disambiguate the cache-line-op ;
244 * the existing scheme of piggybacking won't work for certain configurations.
245 * Two new registers IC_PTAG and DC_PTAG inttoduced.
246 * "tag" bits are provided in PTAG, index bits in existing IVIL/IVDL/FLDL regs
Vineet Guptabd129762013-09-05 13:43:03 +0530247 */
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530248
Vineet Gupta11e14892014-08-04 08:32:31 -0700249static inline
Vineet Gupta28b4af72015-09-14 18:43:42 -0700250void __cache_line_loop_v2(phys_addr_t paddr, unsigned long vaddr,
Vineet Gupta7d3d1622017-01-23 19:32:23 -0800251 unsigned long sz, const int op, const int full_page)
Vineet Guptabd129762013-09-05 13:43:03 +0530252{
Vineet Gupta11e14892014-08-04 08:32:31 -0700253 unsigned int aux_cmd;
Vineet Guptabd129762013-09-05 13:43:03 +0530254 int num_lines;
255
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530256 if (op == OP_INV_IC) {
Vineet Guptabd129762013-09-05 13:43:03 +0530257 aux_cmd = ARC_REG_IC_IVIL;
Vineet Gupta11e14892014-08-04 08:32:31 -0700258 } else {
Vineet Guptabd129762013-09-05 13:43:03 +0530259 /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530260 aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
Vineet Guptabd129762013-09-05 13:43:03 +0530261 }
262
263 /* Ensure we properly floor/ceil the non-line aligned/sized requests
264 * and have @paddr - aligned to cache line and integral @num_lines.
265 * This however can be avoided for page sized since:
266 * -@paddr will be cache-line aligned already (being page aligned)
267 * -@sz will be integral multiple of line size (being page sized).
268 */
Vineet Gupta11e14892014-08-04 08:32:31 -0700269 if (!full_page) {
Vineet Guptabd129762013-09-05 13:43:03 +0530270 sz += paddr & ~CACHE_LINE_MASK;
271 paddr &= CACHE_LINE_MASK;
272 vaddr &= CACHE_LINE_MASK;
273 }
274
275 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
276
Vineet Guptabd129762013-09-05 13:43:03 +0530277 /* MMUv2 and before: paddr contains stuffed vaddrs bits */
278 paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
Vineet Guptabd129762013-09-05 13:43:03 +0530279
280 while (num_lines-- > 0) {
Vineet Gupta11e14892014-08-04 08:32:31 -0700281 write_aux_reg(aux_cmd, paddr);
282 paddr += L1_CACHE_BYTES;
283 }
284}
285
Vineet Gupta5a364c22015-02-06 18:44:57 +0300286/*
287 * For ARC700 MMUv3 I-cache and D-cache flushes
Vineet Guptafa84d732017-01-04 12:02:44 -0800288 * - ARC700 programming model requires paddr and vaddr be passed in seperate
289 * AUX registers (*_IV*L and *_PTAG respectively) irrespective of whether the
290 * caches actually alias or not.
291 * - For HS38, only the aliasing I-cache configuration uses the PTAG reg
292 * (non aliasing I-cache version doesn't; while D-cache can't possibly alias)
Vineet Gupta5a364c22015-02-06 18:44:57 +0300293 */
Vineet Gupta11e14892014-08-04 08:32:31 -0700294static inline
Vineet Gupta28b4af72015-09-14 18:43:42 -0700295void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
Vineet Gupta7d3d1622017-01-23 19:32:23 -0800296 unsigned long sz, const int op, const int full_page)
Vineet Gupta11e14892014-08-04 08:32:31 -0700297{
298 unsigned int aux_cmd, aux_tag;
299 int num_lines;
Vineet Gupta11e14892014-08-04 08:32:31 -0700300
301 if (op == OP_INV_IC) {
302 aux_cmd = ARC_REG_IC_IVIL;
303 aux_tag = ARC_REG_IC_PTAG;
304 } else {
305 aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
306 aux_tag = ARC_REG_DC_PTAG;
307 }
308
309 /* Ensure we properly floor/ceil the non-line aligned/sized requests
310 * and have @paddr - aligned to cache line and integral @num_lines.
311 * This however can be avoided for page sized since:
312 * -@paddr will be cache-line aligned already (being page aligned)
313 * -@sz will be integral multiple of line size (being page sized).
314 */
315 if (!full_page) {
316 sz += paddr & ~CACHE_LINE_MASK;
317 paddr &= CACHE_LINE_MASK;
318 vaddr &= CACHE_LINE_MASK;
319 }
320 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
321
322 /*
323 * MMUv3, cache ops require paddr in PTAG reg
324 * if V-P const for loop, PTAG can be written once outside loop
325 */
326 if (full_page)
327 write_aux_reg(aux_tag, paddr);
328
Vineet Gupta5a364c22015-02-06 18:44:57 +0300329 /*
330 * This is technically for MMU v4, using the MMU v3 programming model
Andrea Gelmini25474762016-05-21 13:45:35 +0200331 * Special work for HS38 aliasing I-cache configuration with PAE40
Vineet Gupta5a364c22015-02-06 18:44:57 +0300332 * - upper 8 bits of paddr need to be written into PTAG_HI
333 * - (and needs to be written before the lower 32 bits)
334 * Note that PTAG_HI is hoisted outside the line loop
335 */
336 if (is_pae40_enabled() && op == OP_INV_IC)
337 write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
338
Vineet Gupta11e14892014-08-04 08:32:31 -0700339 while (num_lines-- > 0) {
340 if (!full_page) {
Vineet Guptad4599ba2013-09-05 14:45:51 +0530341 write_aux_reg(aux_tag, paddr);
342 paddr += L1_CACHE_BYTES;
343 }
Vineet Guptabd129762013-09-05 13:43:03 +0530344
345 write_aux_reg(aux_cmd, vaddr);
346 vaddr += L1_CACHE_BYTES;
Vineet Guptabd129762013-09-05 13:43:03 +0530347 }
348}
Vineet Gupta95d69762013-01-18 15:12:19 +0530349
Vineet Gupta0d771172014-08-29 10:55:15 +0530350#ifndef USE_RGN_FLSH
351
Vineet Guptad1f317d2015-04-06 17:23:57 +0530352/*
Vineet Gupta5a364c22015-02-06 18:44:57 +0300353 * In HS38x (MMU v4), I-cache is VIPT (can alias), D-cache is PIPT
354 * Here's how cache ops are implemented
Vineet Guptad1f317d2015-04-06 17:23:57 +0530355 *
Vineet Gupta5a364c22015-02-06 18:44:57 +0300356 * - D-cache: only paddr needed (in DC_IVDL/DC_FLDL)
357 * - I-cache Non Aliasing: Despite VIPT, only paddr needed (in IC_IVIL)
358 * - I-cache Aliasing: Both vaddr and paddr needed (in IC_IVIL, IC_PTAG
359 * respectively, similar to MMU v3 programming model, hence
360 * __cache_line_loop_v3() is used)
361 *
362 * If PAE40 is enabled, independent of aliasing considerations, the higher bits
363 * needs to be written into PTAG_HI
Vineet Guptad1f317d2015-04-06 17:23:57 +0530364 */
365static inline
Vineet Gupta28b4af72015-09-14 18:43:42 -0700366void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
Vineet Gupta7d3d1622017-01-23 19:32:23 -0800367 unsigned long sz, const int op, const int full_page)
Vineet Guptad1f317d2015-04-06 17:23:57 +0530368{
369 unsigned int aux_cmd;
370 int num_lines;
Vineet Guptad1f317d2015-04-06 17:23:57 +0530371
Vineet Gupta7d3d1622017-01-23 19:32:23 -0800372 if (op == OP_INV_IC) {
Vineet Guptad1f317d2015-04-06 17:23:57 +0530373 aux_cmd = ARC_REG_IC_IVIL;
374 } else {
375 /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
Vineet Gupta7d3d1622017-01-23 19:32:23 -0800376 aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
Vineet Guptad1f317d2015-04-06 17:23:57 +0530377 }
378
379 /* Ensure we properly floor/ceil the non-line aligned/sized requests
380 * and have @paddr - aligned to cache line and integral @num_lines.
381 * This however can be avoided for page sized since:
382 * -@paddr will be cache-line aligned already (being page aligned)
383 * -@sz will be integral multiple of line size (being page sized).
384 */
Vineet Gupta7d3d1622017-01-23 19:32:23 -0800385 if (!full_page) {
Vineet Guptad1f317d2015-04-06 17:23:57 +0530386 sz += paddr & ~CACHE_LINE_MASK;
387 paddr &= CACHE_LINE_MASK;
388 }
389
390 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
391
Vineet Gupta5a364c22015-02-06 18:44:57 +0300392 /*
393 * For HS38 PAE40 configuration
394 * - upper 8 bits of paddr need to be written into PTAG_HI
395 * - (and needs to be written before the lower 32 bits)
396 */
397 if (is_pae40_enabled()) {
Vineet Gupta7d3d1622017-01-23 19:32:23 -0800398 if (op == OP_INV_IC)
Vineet Gupta5a364c22015-02-06 18:44:57 +0300399 /*
400 * Non aliasing I-cache in HS38,
401 * aliasing I-cache handled in __cache_line_loop_v3()
402 */
403 write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
404 else
405 write_aux_reg(ARC_REG_DC_PTAG_HI, (u64)paddr >> 32);
406 }
407
Vineet Guptad1f317d2015-04-06 17:23:57 +0530408 while (num_lines-- > 0) {
409 write_aux_reg(aux_cmd, paddr);
410 paddr += L1_CACHE_BYTES;
411 }
412}
413
Vineet Gupta0d771172014-08-29 10:55:15 +0530414#else
415
416/*
417 * optimized flush operation which takes a region as opposed to iterating per line
418 */
419static inline
420void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
421 unsigned long sz, const int op, const int full_page)
422{
Vineet Guptaee40bd12017-05-02 15:28:12 -0700423 unsigned int s, e;
Vineet Gupta0d771172014-08-29 10:55:15 +0530424
425 /* Only for Non aliasing I-cache in HS38 */
426 if (op == OP_INV_IC) {
427 s = ARC_REG_IC_IVIR;
428 e = ARC_REG_IC_ENDR;
429 } else {
430 s = ARC_REG_DC_STARTR;
431 e = ARC_REG_DC_ENDR;
432 }
433
434 if (!full_page) {
435 /* for any leading gap between @paddr and start of cache line */
436 sz += paddr & ~CACHE_LINE_MASK;
437 paddr &= CACHE_LINE_MASK;
438
439 /*
440 * account for any trailing gap to end of cache line
441 * this is equivalent to DIV_ROUND_UP() in line ops above
442 */
443 sz += L1_CACHE_BYTES - 1;
444 }
445
446 if (is_pae40_enabled()) {
447 /* TBD: check if crossing 4TB boundary */
448 if (op == OP_INV_IC)
449 write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
450 else
451 write_aux_reg(ARC_REG_DC_PTAG_HI, (u64)paddr >> 32);
452 }
453
Vineet Gupta0d771172014-08-29 10:55:15 +0530454 /* ENDR needs to be set ahead of START */
455 write_aux_reg(e, paddr + sz); /* ENDR is exclusive */
456 write_aux_reg(s, paddr);
457
458 /* caller waits on DC_CTRL.FS */
459}
460
461#endif
462
Vineet Gupta11e14892014-08-04 08:32:31 -0700463#if (CONFIG_ARC_MMU_VER < 3)
464#define __cache_line_loop __cache_line_loop_v2
465#elif (CONFIG_ARC_MMU_VER == 3)
466#define __cache_line_loop __cache_line_loop_v3
Vineet Guptad1f317d2015-04-06 17:23:57 +0530467#elif (CONFIG_ARC_MMU_VER > 3)
468#define __cache_line_loop __cache_line_loop_v4
Vineet Gupta11e14892014-08-04 08:32:31 -0700469#endif
470
Vineet Gupta95d69762013-01-18 15:12:19 +0530471#ifdef CONFIG_ARC_HAS_DCACHE
472
473/***************************************************************
474 * Machine specific helpers for Entire D-Cache or Per Line ops
475 */
476
Vineet Guptaee40bd12017-05-02 15:28:12 -0700477#ifndef USE_RGN_FLSH
478/*
479 * this version avoids extra read/write of DC_CTRL for flush or invalid ops
480 * in the non region flush regime (such as for ARCompact)
481 */
Vineet Gupta6c310682015-06-04 08:53:47 +0530482static inline void __before_dc_op(const int op)
Vineet Gupta95d69762013-01-18 15:12:19 +0530483{
Vineet Gupta1b1a22b2014-06-29 19:03:58 +0530484 if (op == OP_FLUSH_N_INV) {
485 /* Dcache provides 2 cmd: FLUSH or INV
486 * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
487 * flush-n-inv is achieved by INV cmd but with IM=1
488 * So toggle INV sub-mode depending on op request and default
489 */
Vineet Gupta6c310682015-06-04 08:53:47 +0530490 const unsigned int ctl = ARC_REG_DC_CTRL;
491 write_aux_reg(ctl, read_aux_reg(ctl) | DC_CTRL_INV_MODE_FLUSH);
Vineet Gupta1b1a22b2014-06-29 19:03:58 +0530492 }
Vineet Gupta1b1a22b2014-06-29 19:03:58 +0530493}
494
Vineet Guptaee40bd12017-05-02 15:28:12 -0700495#else
496
497static inline void __before_dc_op(const int op)
498{
499 const unsigned int ctl = ARC_REG_DC_CTRL;
500 unsigned int val = read_aux_reg(ctl);
501
502 if (op == OP_FLUSH_N_INV) {
503 val |= DC_CTRL_INV_MODE_FLUSH;
504 }
505
506 if (op != OP_INV_IC) {
507 /*
508 * Flush / Invalidate is provided by DC_CTRL.RNG_OP 0 or 1
509 * combined Flush-n-invalidate uses DC_CTRL.IM = 1 set above
510 */
511 val &= ~DC_CTRL_RGN_OP_MSK;
512 if (op & OP_INV)
513 val |= DC_CTRL_RGN_OP_INV;
514 }
515 write_aux_reg(ctl, val);
516}
517
518#endif
519
520
Vineet Gupta6c310682015-06-04 08:53:47 +0530521static inline void __after_dc_op(const int op)
Vineet Gupta1b1a22b2014-06-29 19:03:58 +0530522{
Vineet Gupta6c310682015-06-04 08:53:47 +0530523 if (op & OP_FLUSH) {
524 const unsigned int ctl = ARC_REG_DC_CTRL;
525 unsigned int reg;
Vineet Gupta1b1a22b2014-06-29 19:03:58 +0530526
Vineet Gupta6c310682015-06-04 08:53:47 +0530527 /* flush / flush-n-inv both wait */
528 while ((reg = read_aux_reg(ctl)) & DC_CTRL_FLUSH_STATUS)
529 ;
530
531 /* Switch back to default Invalidate mode */
532 if (op == OP_FLUSH_N_INV)
533 write_aux_reg(ctl, reg & ~DC_CTRL_INV_MODE_FLUSH);
534 }
Vineet Gupta95d69762013-01-18 15:12:19 +0530535}
536
537/*
538 * Operation on Entire D-Cache
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530539 * @op = {OP_INV, OP_FLUSH, OP_FLUSH_N_INV}
Vineet Gupta95d69762013-01-18 15:12:19 +0530540 * Note that constant propagation ensures all the checks are gone
541 * in generated code
542 */
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530543static inline void __dc_entire_op(const int op)
Vineet Gupta95d69762013-01-18 15:12:19 +0530544{
Vineet Gupta95d69762013-01-18 15:12:19 +0530545 int aux;
546
Vineet Gupta6c310682015-06-04 08:53:47 +0530547 __before_dc_op(op);
Vineet Gupta95d69762013-01-18 15:12:19 +0530548
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530549 if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */
Vineet Gupta95d69762013-01-18 15:12:19 +0530550 aux = ARC_REG_DC_IVDC;
551 else
552 aux = ARC_REG_DC_FLSH;
553
554 write_aux_reg(aux, 0x1);
555
Vineet Gupta6c310682015-06-04 08:53:47 +0530556 __after_dc_op(op);
Vineet Gupta95d69762013-01-18 15:12:19 +0530557}
558
Vineet Gupta8c47f832016-06-22 16:01:19 +0530559static inline void __dc_disable(void)
560{
561 const int r = ARC_REG_DC_CTRL;
562
563 __dc_entire_op(OP_FLUSH_N_INV);
564 write_aux_reg(r, read_aux_reg(r) | DC_CTRL_DIS);
565}
566
567static void __dc_enable(void)
568{
569 const int r = ARC_REG_DC_CTRL;
570
571 write_aux_reg(r, read_aux_reg(r) & ~DC_CTRL_DIS);
572}
573
Vineet Gupta4102b532013-05-09 21:54:51 +0530574/* For kernel mappings cache operation: index is same as paddr */
Vineet Gupta6ec18a82013-05-09 15:10:18 +0530575#define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op)
576
Vineet Gupta95d69762013-01-18 15:12:19 +0530577/*
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530578 * D-Cache Line ops: Per Line INV (discard or wback+discard) or FLUSH (wback)
Vineet Gupta95d69762013-01-18 15:12:19 +0530579 */
Vineet Gupta28b4af72015-09-14 18:43:42 -0700580static inline void __dc_line_op(phys_addr_t paddr, unsigned long vaddr,
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530581 unsigned long sz, const int op)
Vineet Gupta95d69762013-01-18 15:12:19 +0530582{
Vineet Gupta7d3d1622017-01-23 19:32:23 -0800583 const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
Vineet Gupta1b1a22b2014-06-29 19:03:58 +0530584 unsigned long flags;
Vineet Gupta95d69762013-01-18 15:12:19 +0530585
586 local_irq_save(flags);
587
Vineet Gupta6c310682015-06-04 08:53:47 +0530588 __before_dc_op(op);
Vineet Gupta95d69762013-01-18 15:12:19 +0530589
Vineet Gupta7d3d1622017-01-23 19:32:23 -0800590 __cache_line_loop(paddr, vaddr, sz, op, full_page);
Vineet Gupta95d69762013-01-18 15:12:19 +0530591
Vineet Gupta6c310682015-06-04 08:53:47 +0530592 __after_dc_op(op);
Vineet Gupta95d69762013-01-18 15:12:19 +0530593
594 local_irq_restore(flags);
595}
596
597#else
598
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530599#define __dc_entire_op(op)
Vineet Gupta8c47f832016-06-22 16:01:19 +0530600#define __dc_disable()
601#define __dc_enable()
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +0530602#define __dc_line_op(paddr, vaddr, sz, op)
603#define __dc_line_op_k(paddr, sz, op)
Vineet Gupta95d69762013-01-18 15:12:19 +0530604
605#endif /* CONFIG_ARC_HAS_DCACHE */
606
Vineet Gupta95d69762013-01-18 15:12:19 +0530607#ifdef CONFIG_ARC_HAS_ICACHE
608
Vineet Guptaaf5abf12014-07-09 14:59:47 +0530609static inline void __ic_entire_inv(void)
610{
611 write_aux_reg(ARC_REG_IC_IVIC, 1);
612 read_aux_reg(ARC_REG_IC_CTRL); /* blocks */
613}
614
615static inline void
Vineet Gupta28b4af72015-09-14 18:43:42 -0700616__ic_line_inv_vaddr_local(phys_addr_t paddr, unsigned long vaddr,
Vineet Guptaaf5abf12014-07-09 14:59:47 +0530617 unsigned long sz)
Vineet Gupta95d69762013-01-18 15:12:19 +0530618{
Vineet Gupta7d3d1622017-01-23 19:32:23 -0800619 const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
Vineet Gupta95d69762013-01-18 15:12:19 +0530620 unsigned long flags;
Vineet Gupta95d69762013-01-18 15:12:19 +0530621
622 local_irq_save(flags);
Vineet Gupta7d3d1622017-01-23 19:32:23 -0800623 (*_cache_line_loop_ic_fn)(paddr, vaddr, sz, OP_INV_IC, full_page);
Vineet Gupta95d69762013-01-18 15:12:19 +0530624 local_irq_restore(flags);
625}
626
Vineet Guptaaf5abf12014-07-09 14:59:47 +0530627#ifndef CONFIG_SMP
Vineet Gupta336e1992013-06-22 19:22:42 +0530628
Vineet Guptaaf5abf12014-07-09 14:59:47 +0530629#define __ic_line_inv_vaddr(p, v, s) __ic_line_inv_vaddr_local(p, v, s)
630
631#else
632
633struct ic_inv_args {
Vineet Gupta28b4af72015-09-14 18:43:42 -0700634 phys_addr_t paddr, vaddr;
Vineet Gupta2328af02013-02-17 12:51:42 +0200635 int sz;
636};
637
638static void __ic_line_inv_vaddr_helper(void *info)
639{
Noam Camus014018e2014-09-03 14:41:11 +0300640 struct ic_inv_args *ic_inv = info;
Vineet Guptaaf5abf12014-07-09 14:59:47 +0530641
Vineet Gupta2328af02013-02-17 12:51:42 +0200642 __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz);
643}
644
Vineet Gupta28b4af72015-09-14 18:43:42 -0700645static void __ic_line_inv_vaddr(phys_addr_t paddr, unsigned long vaddr,
Vineet Gupta2328af02013-02-17 12:51:42 +0200646 unsigned long sz)
647{
Vineet Guptaaf5abf12014-07-09 14:59:47 +0530648 struct ic_inv_args ic_inv = {
649 .paddr = paddr,
650 .vaddr = vaddr,
651 .sz = sz
652 };
653
Vineet Gupta2328af02013-02-17 12:51:42 +0200654 on_each_cpu(__ic_line_inv_vaddr_helper, &ic_inv, 1);
655}
Vineet Guptaaf5abf12014-07-09 14:59:47 +0530656
657#endif /* CONFIG_SMP */
658
659#else /* !CONFIG_ARC_HAS_ICACHE */
Vineet Gupta95d69762013-01-18 15:12:19 +0530660
Vineet Gupta336e1992013-06-22 19:22:42 +0530661#define __ic_entire_inv()
Vineet Gupta95d69762013-01-18 15:12:19 +0530662#define __ic_line_inv_vaddr(pstart, vstart, sz)
663
664#endif /* CONFIG_ARC_HAS_ICACHE */
665
Vineet Guptaae0b63d2017-08-01 10:23:27 +0530666noinline void slc_op_rgn(phys_addr_t paddr, unsigned long sz, const int op)
Vineet Gupta795f4552015-04-03 12:37:07 +0300667{
668#ifdef CONFIG_ISA_ARCV2
Alexey Brodkinb607edd2015-06-29 15:24:37 +0300669 /*
670 * SLC is shared between all cores and concurrent aux operations from
671 * multiple cores need to be serialized using a spinlock
672 * A concurrent operation can be silently ignored and/or the old/new
673 * operation can remain incomplete forever (lockup in SLC_CTRL_BUSY loop
674 * below)
675 */
676 static DEFINE_SPINLOCK(lock);
Vineet Gupta795f4552015-04-03 12:37:07 +0300677 unsigned long flags;
678 unsigned int ctrl;
Alexey Brodkin7d79cee2017-08-01 12:58:47 +0300679 phys_addr_t end;
Vineet Gupta795f4552015-04-03 12:37:07 +0300680
Alexey Brodkinb607edd2015-06-29 15:24:37 +0300681 spin_lock_irqsave(&lock, flags);
Vineet Gupta795f4552015-04-03 12:37:07 +0300682
683 /*
684 * The Region Flush operation is specified by CTRL.RGN_OP[11..9]
685 * - b'000 (default) is Flush,
686 * - b'001 is Invalidate if CTRL.IM == 0
687 * - b'001 is Flush-n-Invalidate if CTRL.IM == 1
688 */
689 ctrl = read_aux_reg(ARC_REG_SLC_CTRL);
690
691 /* Don't rely on default value of IM bit */
692 if (!(op & OP_FLUSH)) /* i.e. OP_INV */
693 ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
694 else
695 ctrl |= SLC_CTRL_IM;
696
697 if (op & OP_INV)
698 ctrl |= SLC_CTRL_RGN_OP_INV; /* Inv or flush-n-inv */
699 else
700 ctrl &= ~SLC_CTRL_RGN_OP_INV;
701
702 write_aux_reg(ARC_REG_SLC_CTRL, ctrl);
703
704 /*
705 * Lower bits are ignored, no need to clip
706 * END needs to be setup before START (latter triggers the operation)
707 * END can't be same as START, so add (l2_line_sz - 1) to sz
708 */
Alexey Brodkin7d79cee2017-08-01 12:58:47 +0300709 end = paddr + sz + l2_line_sz - 1;
710 if (is_pae40_enabled())
711 write_aux_reg(ARC_REG_SLC_RGN_END1, upper_32_bits(end));
712
713 write_aux_reg(ARC_REG_SLC_RGN_END, lower_32_bits(end));
714
715 if (is_pae40_enabled())
716 write_aux_reg(ARC_REG_SLC_RGN_START1, upper_32_bits(paddr));
717
718 write_aux_reg(ARC_REG_SLC_RGN_START, lower_32_bits(paddr));
Vineet Gupta795f4552015-04-03 12:37:07 +0300719
Alexey Brodkinb37174d2017-07-07 12:25:14 +0300720 /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
721 read_aux_reg(ARC_REG_SLC_CTRL);
722
Vineet Gupta795f4552015-04-03 12:37:07 +0300723 while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
724
Alexey Brodkinb607edd2015-06-29 15:24:37 +0300725 spin_unlock_irqrestore(&lock, flags);
Vineet Gupta795f4552015-04-03 12:37:07 +0300726#endif
727}
728
Vineet Guptaae0b63d2017-08-01 10:23:27 +0530729noinline void slc_op_line(phys_addr_t paddr, unsigned long sz, const int op)
730{
731#ifdef CONFIG_ISA_ARCV2
732 /*
733 * SLC is shared between all cores and concurrent aux operations from
734 * multiple cores need to be serialized using a spinlock
735 * A concurrent operation can be silently ignored and/or the old/new
736 * operation can remain incomplete forever (lockup in SLC_CTRL_BUSY loop
737 * below)
738 */
739 static DEFINE_SPINLOCK(lock);
740
741 const unsigned long SLC_LINE_MASK = ~(l2_line_sz - 1);
742 unsigned int ctrl, cmd;
743 unsigned long flags;
744 int num_lines;
745
746 spin_lock_irqsave(&lock, flags);
747
748 ctrl = read_aux_reg(ARC_REG_SLC_CTRL);
749
750 /* Don't rely on default value of IM bit */
751 if (!(op & OP_FLUSH)) /* i.e. OP_INV */
752 ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
753 else
754 ctrl |= SLC_CTRL_IM;
755
756 write_aux_reg(ARC_REG_SLC_CTRL, ctrl);
757
758 cmd = op & OP_INV ? ARC_AUX_SLC_IVDL : ARC_AUX_SLC_FLDL;
759
760 sz += paddr & ~SLC_LINE_MASK;
761 paddr &= SLC_LINE_MASK;
762
763 num_lines = DIV_ROUND_UP(sz, l2_line_sz);
764
765 while (num_lines-- > 0) {
766 write_aux_reg(cmd, paddr);
767 paddr += l2_line_sz;
768 }
769
770 /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
771 read_aux_reg(ARC_REG_SLC_CTRL);
772
773 while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
774
775 spin_unlock_irqrestore(&lock, flags);
776#endif
777}
778
779#define slc_op(paddr, sz, op) slc_op_rgn(paddr, sz, op)
780
Vineet Guptad4911cd2016-06-22 15:43:22 +0530781noinline static void slc_entire_op(const int op)
782{
783 unsigned int ctrl, r = ARC_REG_SLC_CTRL;
784
785 ctrl = read_aux_reg(r);
786
787 if (!(op & OP_FLUSH)) /* i.e. OP_INV */
788 ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
789 else
790 ctrl |= SLC_CTRL_IM;
791
792 write_aux_reg(r, ctrl);
793
Eugeniy Paltsev8bbfbc22018-01-17 16:39:17 +0300794 if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */
795 write_aux_reg(ARC_REG_SLC_INVALIDATE, 0x1);
796 else
797 write_aux_reg(ARC_REG_SLC_FLUSH, 0x1);
Vineet Guptad4911cd2016-06-22 15:43:22 +0530798
Alexey Brodkinc70c4732017-03-29 17:15:11 +0300799 /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
800 read_aux_reg(r);
801
Vineet Guptad4911cd2016-06-22 15:43:22 +0530802 /* Important to wait for flush to complete */
803 while (read_aux_reg(r) & SLC_CTRL_BUSY);
804}
805
806static inline void arc_slc_disable(void)
807{
808 const int r = ARC_REG_SLC_CTRL;
809
810 slc_entire_op(OP_FLUSH_N_INV);
811 write_aux_reg(r, read_aux_reg(r) | SLC_CTRL_DIS);
812}
813
814static inline void arc_slc_enable(void)
815{
816 const int r = ARC_REG_SLC_CTRL;
817
818 write_aux_reg(r, read_aux_reg(r) & ~SLC_CTRL_DIS);
819}
820
Vineet Gupta95d69762013-01-18 15:12:19 +0530821/***********************************************************
822 * Exported APIs
823 */
824
Vineet Gupta4102b532013-05-09 21:54:51 +0530825/*
826 * Handle cache congruency of kernel and userspace mappings of page when kernel
827 * writes-to/reads-from
828 *
829 * The idea is to defer flushing of kernel mapping after a WRITE, possible if:
830 * -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
831 * -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
832 * -In SMP, if hardware caches are coherent
833 *
834 * There's a corollary case, where kernel READs from a userspace mapped page.
835 * If the U-mapping is not congruent to to K-mapping, former needs flushing.
836 */
Vineet Gupta95d69762013-01-18 15:12:19 +0530837void flush_dcache_page(struct page *page)
838{
Vineet Gupta4102b532013-05-09 21:54:51 +0530839 struct address_space *mapping;
840
841 if (!cache_is_vipt_aliasing()) {
Vineet Gupta2ed21da2013-05-13 17:23:58 +0530842 clear_bit(PG_dc_clean, &page->flags);
Vineet Gupta4102b532013-05-09 21:54:51 +0530843 return;
844 }
845
846 /* don't handle anon pages here */
Huang Yingcb9f7532018-04-05 16:24:39 -0700847 mapping = page_mapping_file(page);
Vineet Gupta4102b532013-05-09 21:54:51 +0530848 if (!mapping)
849 return;
850
851 /*
852 * pagecache page, file not yet mapped to userspace
853 * Make a note that K-mapping is dirty
854 */
855 if (!mapping_mapped(mapping)) {
Vineet Gupta2ed21da2013-05-13 17:23:58 +0530856 clear_bit(PG_dc_clean, &page->flags);
Kirill A. Shutemove1534ae2016-01-15 16:53:46 -0800857 } else if (page_mapcount(page)) {
Vineet Gupta4102b532013-05-09 21:54:51 +0530858
859 /* kernel reading from page with U-mapping */
Vineet Gupta28b4af72015-09-14 18:43:42 -0700860 phys_addr_t paddr = (unsigned long)page_address(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300861 unsigned long vaddr = page->index << PAGE_SHIFT;
Vineet Gupta4102b532013-05-09 21:54:51 +0530862
863 if (addr_not_cache_congruent(paddr, vaddr))
864 __flush_dcache_page(paddr, vaddr);
865 }
Vineet Gupta95d69762013-01-18 15:12:19 +0530866}
867EXPORT_SYMBOL(flush_dcache_page);
868
Alexey Brodkinf2b0b252015-05-25 19:54:28 +0300869/*
870 * DMA ops for systems with L1 cache only
871 * Make memory coherent with L1 cache by flushing/invalidating L1 lines
872 */
Vineet Guptaf5db19e2016-03-16 15:04:39 +0530873static void __dma_cache_wback_inv_l1(phys_addr_t start, unsigned long sz)
Vineet Gupta95d69762013-01-18 15:12:19 +0530874{
Vineet Gupta6ec18a82013-05-09 15:10:18 +0530875 __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
Alexey Brodkinf2b0b252015-05-25 19:54:28 +0300876}
Vineet Gupta795f4552015-04-03 12:37:07 +0300877
Vineet Guptaf5db19e2016-03-16 15:04:39 +0530878static void __dma_cache_inv_l1(phys_addr_t start, unsigned long sz)
Alexey Brodkinf2b0b252015-05-25 19:54:28 +0300879{
880 __dc_line_op_k(start, sz, OP_INV);
881}
882
Vineet Guptaf5db19e2016-03-16 15:04:39 +0530883static void __dma_cache_wback_l1(phys_addr_t start, unsigned long sz)
Alexey Brodkinf2b0b252015-05-25 19:54:28 +0300884{
885 __dc_line_op_k(start, sz, OP_FLUSH);
886}
887
888/*
889 * DMA ops for systems with both L1 and L2 caches, but without IOC
Adam Buchbinder7423cc02016-02-23 15:24:55 -0800890 * Both L1 and L2 lines need to be explicitly flushed/invalidated
Alexey Brodkinf2b0b252015-05-25 19:54:28 +0300891 */
Vineet Guptaf5db19e2016-03-16 15:04:39 +0530892static void __dma_cache_wback_inv_slc(phys_addr_t start, unsigned long sz)
Alexey Brodkinf2b0b252015-05-25 19:54:28 +0300893{
894 __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
895 slc_op(start, sz, OP_FLUSH_N_INV);
896}
897
Vineet Guptaf5db19e2016-03-16 15:04:39 +0530898static void __dma_cache_inv_slc(phys_addr_t start, unsigned long sz)
Alexey Brodkinf2b0b252015-05-25 19:54:28 +0300899{
900 __dc_line_op_k(start, sz, OP_INV);
901 slc_op(start, sz, OP_INV);
902}
903
Vineet Guptaf5db19e2016-03-16 15:04:39 +0530904static void __dma_cache_wback_slc(phys_addr_t start, unsigned long sz)
Alexey Brodkinf2b0b252015-05-25 19:54:28 +0300905{
906 __dc_line_op_k(start, sz, OP_FLUSH);
907 slc_op(start, sz, OP_FLUSH);
908}
909
910/*
Alexey Brodkinf2b0b252015-05-25 19:54:28 +0300911 * Exported DMA API
912 */
Vineet Guptaf5db19e2016-03-16 15:04:39 +0530913void dma_cache_wback_inv(phys_addr_t start, unsigned long sz)
Alexey Brodkinf2b0b252015-05-25 19:54:28 +0300914{
915 __dma_cache_wback_inv(start, sz);
Vineet Gupta95d69762013-01-18 15:12:19 +0530916}
917EXPORT_SYMBOL(dma_cache_wback_inv);
918
Vineet Guptaf5db19e2016-03-16 15:04:39 +0530919void dma_cache_inv(phys_addr_t start, unsigned long sz)
Vineet Gupta95d69762013-01-18 15:12:19 +0530920{
Alexey Brodkinf2b0b252015-05-25 19:54:28 +0300921 __dma_cache_inv(start, sz);
Vineet Gupta95d69762013-01-18 15:12:19 +0530922}
923EXPORT_SYMBOL(dma_cache_inv);
924
Vineet Guptaf5db19e2016-03-16 15:04:39 +0530925void dma_cache_wback(phys_addr_t start, unsigned long sz)
Vineet Gupta95d69762013-01-18 15:12:19 +0530926{
Alexey Brodkinf2b0b252015-05-25 19:54:28 +0300927 __dma_cache_wback(start, sz);
Vineet Gupta95d69762013-01-18 15:12:19 +0530928}
929EXPORT_SYMBOL(dma_cache_wback);
930
931/*
Vineet Gupta7586bf722013-04-12 12:18:25 +0530932 * This is API for making I/D Caches consistent when modifying
933 * kernel code (loadable modules, kprobes, kgdb...)
Vineet Gupta95d69762013-01-18 15:12:19 +0530934 * This is called on insmod, with kernel virtual address for CODE of
935 * the module. ARC cache maintenance ops require PHY address thus we
936 * need to convert vmalloc addr to PHY addr
937 */
938void flush_icache_range(unsigned long kstart, unsigned long kend)
939{
Vineet Guptac59414c2014-09-24 11:36:20 +0530940 unsigned int tot_sz;
Vineet Gupta95d69762013-01-18 15:12:19 +0530941
Vineet Guptac59414c2014-09-24 11:36:20 +0530942 WARN(kstart < TASK_SIZE, "%s() can't handle user vaddr", __func__);
Vineet Gupta95d69762013-01-18 15:12:19 +0530943
944 /* Shortcut for bigger flush ranges.
945 * Here we don't care if this was kernel virtual or phy addr
946 */
947 tot_sz = kend - kstart;
948 if (tot_sz > PAGE_SIZE) {
949 flush_cache_all();
950 return;
951 }
952
953 /* Case: Kernel Phy addr (0x8000_0000 onwards) */
954 if (likely(kstart > PAGE_OFFSET)) {
Vineet Gupta7586bf722013-04-12 12:18:25 +0530955 /*
956 * The 2nd arg despite being paddr will be used to index icache
957 * This is OK since no alternate virtual mappings will exist
958 * given the callers for this case: kprobe/kgdb in built-in
959 * kernel code only.
960 */
Vineet Gupta94bad1a2013-04-12 12:20:23 +0530961 __sync_icache_dcache(kstart, kstart, kend - kstart);
Vineet Gupta95d69762013-01-18 15:12:19 +0530962 return;
963 }
964
965 /*
966 * Case: Kernel Vaddr (0x7000_0000 to 0x7fff_ffff)
967 * (1) ARC Cache Maintenance ops only take Phy addr, hence special
968 * handling of kernel vaddr.
969 *
970 * (2) Despite @tot_sz being < PAGE_SIZE (bigger cases handled already),
971 * it still needs to handle a 2 page scenario, where the range
972 * straddles across 2 virtual pages and hence need for loop
973 */
974 while (tot_sz > 0) {
Vineet Guptac59414c2014-09-24 11:36:20 +0530975 unsigned int off, sz;
976 unsigned long phy, pfn;
977
Vineet Gupta95d69762013-01-18 15:12:19 +0530978 off = kstart % PAGE_SIZE;
979 pfn = vmalloc_to_pfn((void *)kstart);
980 phy = (pfn << PAGE_SHIFT) + off;
981 sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off);
Vineet Gupta94bad1a2013-04-12 12:20:23 +0530982 __sync_icache_dcache(phy, kstart, sz);
Vineet Gupta95d69762013-01-18 15:12:19 +0530983 kstart += sz;
984 tot_sz -= sz;
985 }
986}
Pranith Kumare3560302014-08-29 15:19:09 -0700987EXPORT_SYMBOL(flush_icache_range);
Vineet Gupta95d69762013-01-18 15:12:19 +0530988
989/*
Vineet Gupta94bad1a2013-04-12 12:20:23 +0530990 * General purpose helper to make I and D cache lines consistent.
991 * @paddr is phy addr of region
Vineet Gupta4b06ff32013-07-10 11:40:27 +0530992 * @vaddr is typically user vaddr (breakpoint) or kernel vaddr (vmalloc)
993 * However in one instance, when called by kprobe (for a breakpt in
Vineet Gupta94bad1a2013-04-12 12:20:23 +0530994 * builtin kernel code) @vaddr will be paddr only, meaning CDU operation will
995 * use a paddr to index the cache (despite VIPT). This is fine since since a
Vineet Gupta4b06ff32013-07-10 11:40:27 +0530996 * builtin kernel page will not have any virtual mappings.
997 * kprobe on loadable module will be kernel vaddr.
Vineet Gupta95d69762013-01-18 15:12:19 +0530998 */
Vineet Gupta28b4af72015-09-14 18:43:42 -0700999void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len)
Vineet Gupta95d69762013-01-18 15:12:19 +05301000{
Vineet Guptaf5388812013-05-16 12:19:29 +05301001 __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
Vineet Gupta2328af02013-02-17 12:51:42 +02001002 __ic_line_inv_vaddr(paddr, vaddr, len);
Vineet Gupta95d69762013-01-18 15:12:19 +05301003}
1004
Vineet Gupta24603fd2013-04-11 18:36:35 +05301005/* wrapper to compile time eliminate alignment checks in flush loop */
Vineet Gupta28b4af72015-09-14 18:43:42 -07001006void __inv_icache_page(phys_addr_t paddr, unsigned long vaddr)
Vineet Gupta95d69762013-01-18 15:12:19 +05301007{
Vineet Gupta24603fd2013-04-11 18:36:35 +05301008 __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
Vineet Gupta95d69762013-01-18 15:12:19 +05301009}
1010
Vineet Gupta6ec18a82013-05-09 15:10:18 +05301011/*
1012 * wrapper to clearout kernel or userspace mappings of a page
1013 * For kernel mappings @vaddr == @paddr
1014 */
Vineet Gupta28b4af72015-09-14 18:43:42 -07001015void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr)
Vineet Guptaeacd0e92013-04-16 14:10:48 +05301016{
Vineet Gupta6ec18a82013-05-09 15:10:18 +05301017 __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV);
Vineet Guptaeacd0e92013-04-16 14:10:48 +05301018}
1019
Vineet Gupta95d69762013-01-18 15:12:19 +05301020noinline void flush_cache_all(void)
1021{
1022 unsigned long flags;
1023
1024 local_irq_save(flags);
1025
Vineet Gupta336e1992013-06-22 19:22:42 +05301026 __ic_entire_inv();
Vineet Gupta95d69762013-01-18 15:12:19 +05301027 __dc_entire_op(OP_FLUSH_N_INV);
1028
1029 local_irq_restore(flags);
1030
1031}
1032
Vineet Gupta4102b532013-05-09 21:54:51 +05301033#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
1034
1035void flush_cache_mm(struct mm_struct *mm)
1036{
1037 flush_cache_all();
1038}
1039
1040void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
1041 unsigned long pfn)
1042{
Randy Dunlapec837d62018-07-26 20:16:35 -07001043 phys_addr_t paddr = pfn << PAGE_SHIFT;
Vineet Gupta4102b532013-05-09 21:54:51 +05301044
Vineet Gupta5971bc72013-05-16 12:23:31 +05301045 u_vaddr &= PAGE_MASK;
1046
Vineet Gupta45309492015-05-18 12:46:37 +05301047 __flush_dcache_page(paddr, u_vaddr);
Vineet Gupta5971bc72013-05-16 12:23:31 +05301048
1049 if (vma->vm_flags & VM_EXEC)
1050 __inv_icache_page(paddr, u_vaddr);
Vineet Gupta4102b532013-05-09 21:54:51 +05301051}
1052
1053void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
1054 unsigned long end)
1055{
1056 flush_cache_all();
1057}
1058
Vineet Gupta7bb66f62013-05-25 14:04:25 +05301059void flush_anon_page(struct vm_area_struct *vma, struct page *page,
1060 unsigned long u_vaddr)
1061{
1062 /* TBD: do we really need to clear the kernel mapping */
Randy Dunlapec837d62018-07-26 20:16:35 -07001063 __flush_dcache_page((phys_addr_t)page_address(page), u_vaddr);
1064 __flush_dcache_page((phys_addr_t)page_address(page),
1065 (phys_addr_t)page_address(page));
Vineet Gupta7bb66f62013-05-25 14:04:25 +05301066
1067}
1068
1069#endif
1070
Vineet Gupta4102b532013-05-09 21:54:51 +05301071void copy_user_highpage(struct page *to, struct page *from,
1072 unsigned long u_vaddr, struct vm_area_struct *vma)
1073{
Vineet Gupta336e2132015-03-05 17:06:31 +05301074 void *kfrom = kmap_atomic(from);
1075 void *kto = kmap_atomic(to);
Vineet Gupta4102b532013-05-09 21:54:51 +05301076 int clean_src_k_mappings = 0;
1077
1078 /*
1079 * If SRC page was already mapped in userspace AND it's U-mapping is
1080 * not congruent with K-mapping, sync former to physical page so that
1081 * K-mapping in memcpy below, sees the right data
1082 *
1083 * Note that while @u_vaddr refers to DST page's userspace vaddr, it is
1084 * equally valid for SRC page as well
Vineet Gupta336e2132015-03-05 17:06:31 +05301085 *
1086 * For !VIPT cache, all of this gets compiled out as
1087 * addr_not_cache_congruent() is 0
Vineet Gupta4102b532013-05-09 21:54:51 +05301088 */
Kirill A. Shutemove1534ae2016-01-15 16:53:46 -08001089 if (page_mapcount(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
Vineet Gupta336e2132015-03-05 17:06:31 +05301090 __flush_dcache_page((unsigned long)kfrom, u_vaddr);
Vineet Gupta4102b532013-05-09 21:54:51 +05301091 clean_src_k_mappings = 1;
1092 }
1093
Vineet Gupta336e2132015-03-05 17:06:31 +05301094 copy_page(kto, kfrom);
Vineet Gupta4102b532013-05-09 21:54:51 +05301095
1096 /*
1097 * Mark DST page K-mapping as dirty for a later finalization by
1098 * update_mmu_cache(). Although the finalization could have been done
1099 * here as well (given that both vaddr/paddr are available).
1100 * But update_mmu_cache() already has code to do that for other
1101 * non copied user pages (e.g. read faults which wire in pagecache page
1102 * directly).
1103 */
Vineet Gupta2ed21da2013-05-13 17:23:58 +05301104 clear_bit(PG_dc_clean, &to->flags);
Vineet Gupta4102b532013-05-09 21:54:51 +05301105
1106 /*
1107 * if SRC was already usermapped and non-congruent to kernel mapping
1108 * sync the kernel mapping back to physical page
1109 */
1110 if (clean_src_k_mappings) {
Vineet Gupta336e2132015-03-05 17:06:31 +05301111 __flush_dcache_page((unsigned long)kfrom, (unsigned long)kfrom);
Vineet Gupta2ed21da2013-05-13 17:23:58 +05301112 set_bit(PG_dc_clean, &from->flags);
Vineet Gupta4102b532013-05-09 21:54:51 +05301113 } else {
Vineet Gupta2ed21da2013-05-13 17:23:58 +05301114 clear_bit(PG_dc_clean, &from->flags);
Vineet Gupta4102b532013-05-09 21:54:51 +05301115 }
Vineet Gupta336e2132015-03-05 17:06:31 +05301116
1117 kunmap_atomic(kto);
1118 kunmap_atomic(kfrom);
Vineet Gupta4102b532013-05-09 21:54:51 +05301119}
1120
1121void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
1122{
1123 clear_page(to);
Vineet Gupta2ed21da2013-05-13 17:23:58 +05301124 clear_bit(PG_dc_clean, &page->flags);
Vineet Gupta4102b532013-05-09 21:54:51 +05301125}
1126
Vineet Gupta4102b532013-05-09 21:54:51 +05301127
Vineet Gupta95d69762013-01-18 15:12:19 +05301128/**********************************************************************
1129 * Explicit Cache flush request from user space via syscall
1130 * Needed for JITs which generate code on the fly
1131 */
1132SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
1133{
1134 /* TBD: optimize this */
1135 flush_cache_all();
1136 return 0;
1137}
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +05301138
Vineet Gupta8c47f832016-06-22 16:01:19 +05301139/*
1140 * IO-Coherency (IOC) setup rules:
1141 *
1142 * 1. Needs to be at system level, so only once by Master core
1143 * Non-Masters need not be accessing caches at that time
1144 * - They are either HALT_ON_RESET and kick started much later or
1145 * - if run on reset, need to ensure that arc_platform_smp_wait_to_boot()
1146 * doesn't perturb caches or coherency unit
1147 *
1148 * 2. caches (L1 and SLC) need to be purged (flush+inv) before setting up IOC,
1149 * otherwise any straggler data might behave strangely post IOC enabling
1150 *
1151 * 3. All Caches need to be disabled when setting up IOC to elide any in-flight
1152 * Coherency transactions
1153 */
Vineet Gupta76894a72017-01-18 15:10:52 -08001154noinline void __init arc_ioc_setup(void)
Vineet Guptad4911cd2016-06-22 15:43:22 +05301155{
Eugeniy Paltsevbee91c32017-08-15 21:13:53 +03001156 unsigned int ioc_base, mem_sz;
Vineet Guptae497c8e2017-01-18 12:59:21 -08001157
Eugeniy Paltsev2b720e92018-07-30 19:26:35 +03001158 /*
Eugeniy Paltsev36243792018-10-04 16:12:12 +03001159 * If IOC was already enabled (due to bootloader) it technically needs to
1160 * be reconfigured with aperture base,size corresponding to Linux memory map
1161 * which will certainly be different than uboot's. But disabling and
1162 * reenabling IOC when DMA might be potentially active is tricky business.
1163 * To avoid random memory issues later, just panic here and ask user to
1164 * upgrade bootloader to one which doesn't enable IOC
1165 */
1166 if (read_aux_reg(ARC_REG_IO_COH_ENABLE) & ARC_IO_COH_ENABLE_BIT)
1167 panic("IOC already enabled, please upgrade bootloader!\n");
1168
1169 if (!ioc_enable)
1170 return;
1171
Vineet Gupta8c47f832016-06-22 16:01:19 +05301172 /* Flush + invalidate + disable L1 dcache */
1173 __dc_disable();
1174
1175 /* Flush + invalidate SLC */
1176 if (read_aux_reg(ARC_REG_SLC_BCR))
1177 slc_entire_op(OP_FLUSH_N_INV);
1178
Vineet Guptae497c8e2017-01-18 12:59:21 -08001179 /*
Eugeniy Paltsevbee91c32017-08-15 21:13:53 +03001180 * currently IOC Aperture covers entire DDR
Vineet Guptae497c8e2017-01-18 12:59:21 -08001181 * TBD: fix for PGU + 1GB of low mem
1182 * TBD: fix for PAE
1183 */
Eugeniy Paltsevbee91c32017-08-15 21:13:53 +03001184 mem_sz = arc_get_mem_sz();
Vineet Gupta8c47f832016-06-22 16:01:19 +05301185
Eugeniy Paltsevbee91c32017-08-15 21:13:53 +03001186 if (!is_power_of_2(mem_sz) || mem_sz < 4096)
1187 panic("IOC Aperture size must be power of 2 larger than 4KB");
1188
1189 /*
1190 * IOC Aperture size decoded as 2 ^ (SIZE + 2) KB,
1191 * so setting 0x11 implies 512MB, 0x12 implies 1GB...
1192 */
1193 write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, order_base_2(mem_sz >> 10) - 2);
1194
1195 /* for now assume kernel base is start of IOC aperture */
Eugeniy Paltsev9ed68782017-08-15 21:13:54 +03001196 ioc_base = CONFIG_LINUX_RAM_BASE;
Eugeniy Paltsevbee91c32017-08-15 21:13:53 +03001197
1198 if (ioc_base % mem_sz != 0)
1199 panic("IOC Aperture start must be aligned to the size of the aperture");
1200
1201 write_aux_reg(ARC_REG_IO_COH_AP0_BASE, ioc_base >> 12);
Eugeniy Paltsev36243792018-10-04 16:12:12 +03001202 write_aux_reg(ARC_REG_IO_COH_PARTIAL, ARC_IO_COH_PARTIAL_BIT);
1203 write_aux_reg(ARC_REG_IO_COH_ENABLE, ARC_IO_COH_ENABLE_BIT);
Vineet Gupta8c47f832016-06-22 16:01:19 +05301204
1205 /* Re-enable L1 dcache */
1206 __dc_enable();
Vineet Guptad4911cd2016-06-22 15:43:22 +05301207}
1208
Vineet Guptab5ddb6d52017-08-03 17:45:44 +05301209/*
1210 * Cache related boot time checks/setups only needed on master CPU:
1211 * - Geometry checks (kernel build and hardware agree: e.g. L1_CACHE_BYTES)
1212 * Assume SMP only, so all cores will have same cache config. A check on
1213 * one core suffices for all
1214 * - IOC setup / dma callbacks only need to be done once
1215 */
Vineet Gupta76894a72017-01-18 15:10:52 -08001216void __init arc_cache_init_master(void)
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +05301217{
1218 unsigned int __maybe_unused cpu = smp_processor_id();
Vineet Gupta45c3b082016-06-13 16:38:27 +02001219
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +05301220 if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
1221 struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
1222
Vineet Guptaf64915b2016-12-19 11:24:08 -08001223 if (!ic->line_len)
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +05301224 panic("cache support enabled but non-existent cache\n");
1225
1226 if (ic->line_len != L1_CACHE_BYTES)
1227 panic("ICache line [%d] != kernel Config [%d]",
1228 ic->line_len, L1_CACHE_BYTES);
1229
Vineet Guptabcc4d652015-06-04 14:39:15 +05301230 /*
Andrea Gelmini25474762016-05-21 13:45:35 +02001231 * In MMU v4 (HS38x) the aliasing icache config uses IVIL/PTAG
Vineet Guptabcc4d652015-06-04 14:39:15 +05301232 * pair to provide vaddr/paddr respectively, just as in MMU v3
1233 */
1234 if (is_isa_arcv2() && ic->alias)
1235 _cache_line_loop_ic_fn = __cache_line_loop_v3;
1236 else
1237 _cache_line_loop_ic_fn = __cache_line_loop;
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +05301238 }
1239
1240 if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) {
1241 struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +05301242
Vineet Guptaf64915b2016-12-19 11:24:08 -08001243 if (!dc->line_len)
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +05301244 panic("cache support enabled but non-existent cache\n");
1245
1246 if (dc->line_len != L1_CACHE_BYTES)
1247 panic("DCache line [%d] != kernel Config [%d]",
1248 dc->line_len, L1_CACHE_BYTES);
1249
Vineet Guptad1f317d2015-04-06 17:23:57 +05301250 /* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */
1251 if (is_isa_arcompact()) {
1252 int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
Vineet Gupta08fe0072016-12-19 11:38:38 -08001253 int num_colors = dc->sz_k/dc->assoc/TO_KB(PAGE_SIZE);
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +05301254
Vineet Gupta08fe0072016-12-19 11:38:38 -08001255 if (dc->alias) {
1256 if (!handled)
1257 panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
1258 if (CACHE_COLORS_NUM != num_colors)
1259 panic("CACHE_COLORS_NUM not optimized for config\n");
1260 } else if (!dc->alias && handled) {
Vineet Guptad1f317d2015-04-06 17:23:57 +05301261 panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
Vineet Gupta08fe0072016-12-19 11:38:38 -08001262 }
Vineet Guptad1f317d2015-04-06 17:23:57 +05301263 }
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +05301264 }
Alexey Brodkinf2b0b252015-05-25 19:54:28 +03001265
Eugeniy Paltsev386177d2018-07-26 16:15:44 +03001266 /*
1267 * Check that SMP_CACHE_BYTES (and hence ARCH_DMA_MINALIGN) is larger
1268 * or equal to any cache line length.
1269 */
1270 BUILD_BUG_ON_MSG(L1_CACHE_BYTES > SMP_CACHE_BYTES,
1271 "SMP_CACHE_BYTES must be >= any cache line length");
1272 if (is_isa_arcv2() && (l2_line_sz > SMP_CACHE_BYTES))
1273 panic("L2 Cache line [%d] > kernel Config [%d]\n",
1274 l2_line_sz, SMP_CACHE_BYTES);
1275
Vineet Guptad4911cd2016-06-22 15:43:22 +05301276 /* Note that SLC disable not formally supported till HS 3.0 */
1277 if (is_isa_arcv2() && l2_line_sz && !slc_enable)
1278 arc_slc_disable();
Vineet Gupta79335a22015-06-04 18:30:23 +05301279
Eugeniy Paltsev36243792018-10-04 16:12:12 +03001280 if (is_isa_arcv2() && ioc_exists)
Vineet Guptad4911cd2016-06-22 15:43:22 +05301281 arc_ioc_setup();
Vineet Gupta79335a22015-06-04 18:30:23 +05301282
Eugeniy Paltsev2820a702018-07-30 19:26:34 +03001283 if (is_isa_arcv2() && l2_line_sz && slc_enable) {
Alexey Brodkinf2b0b252015-05-25 19:54:28 +03001284 __dma_cache_wback_inv = __dma_cache_wback_inv_slc;
1285 __dma_cache_inv = __dma_cache_inv_slc;
1286 __dma_cache_wback = __dma_cache_wback_slc;
1287 } else {
1288 __dma_cache_wback_inv = __dma_cache_wback_inv_l1;
1289 __dma_cache_inv = __dma_cache_inv_l1;
1290 __dma_cache_wback = __dma_cache_wback_l1;
1291 }
Eugeniy Paltsev2820a702018-07-30 19:26:34 +03001292 /*
1293 * In case of IOC (say IOC+SLC case), pointers above could still be set
1294 * but end up not being relevant as the first function in chain is not
Christoph Hellwig356da6d2018-12-06 13:39:32 -08001295 * called at all for devices using coherent DMA.
Eugeniy Paltsev2820a702018-07-30 19:26:34 +03001296 * arch_sync_dma_for_cpu() -> dma_cache_*() -> __dma_cache_*()
1297 */
Vineet Gupta8ea2ddf2015-06-04 15:35:53 +05301298}
Vineet Gupta76894a72017-01-18 15:10:52 -08001299
1300void __ref arc_cache_init(void)
1301{
1302 unsigned int __maybe_unused cpu = smp_processor_id();
1303 char str[256];
1304
Noam Camus18ee4be2017-06-15 11:43:51 +03001305 pr_info("%s", arc_cache_mumbojumbo(0, str, sizeof(str)));
Vineet Gupta76894a72017-01-18 15:10:52 -08001306
Vineet Gupta76894a72017-01-18 15:10:52 -08001307 if (!cpu)
1308 arc_cache_init_master();
Vineet Guptab5ddb6d52017-08-03 17:45:44 +05301309
1310 /*
1311 * In PAE regime, TLB and cache maintenance ops take wider addresses
1312 * And even if PAE is not enabled in kernel, the upper 32-bits still need
1313 * to be zeroed to keep the ops sane.
1314 * As an optimization for more common !PAE enabled case, zero them out
1315 * once at init, rather than checking/setting to 0 for every runtime op
1316 */
1317 if (is_isa_arcv2() && pae40_exist_but_not_enab()) {
1318
1319 if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE))
1320 write_aux_reg(ARC_REG_IC_PTAG_HI, 0);
1321
1322 if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE))
1323 write_aux_reg(ARC_REG_DC_PTAG_HI, 0);
1324
1325 if (l2_line_sz) {
1326 write_aux_reg(ARC_REG_SLC_RGN_END1, 0);
1327 write_aux_reg(ARC_REG_SLC_RGN_START1, 0);
1328 }
1329 }
Vineet Gupta76894a72017-01-18 15:10:52 -08001330}