blob: ec5fc8da5f9a3baa172a3619794e0f52e719366c [file] [log] [blame]
Jason Robertsce082592010-05-13 15:57:33 +01001/*
2 * NAND Flash Controller Device Driver
3 * Copyright © 2009-2010, Intel Corporation and its suppliers.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 */
Jason Robertsce082592010-05-13 15:57:33 +010019#include <linux/interrupt.h>
20#include <linux/delay.h>
Jamie Iles84457942011-05-06 15:28:55 +010021#include <linux/dma-mapping.h>
Jason Robertsce082592010-05-13 15:57:33 +010022#include <linux/wait.h>
23#include <linux/mutex.h>
Jason Robertsce082592010-05-13 15:57:33 +010024#include <linux/mtd/mtd.h>
25#include <linux/module.h>
26
27#include "denali.h"
28
29MODULE_LICENSE("GPL");
30
Jason Robertsce082592010-05-13 15:57:33 +010031#define DENALI_NAND_NAME "denali-nand"
32
Masahiro Yamada43914a22014-09-09 11:01:51 +090033/*
Masahiro Yamada43914a22014-09-09 11:01:51 +090034 * indicates whether or not the internal value for the flash bank is
35 * valid or not
36 */
Chuanxiao5bac3ac2010-08-05 23:06:04 +080037#define CHIP_SELECT_INVALID -1
Jason Robertsce082592010-05-13 15:57:33 +010038
Masahiro Yamadac19e31d2017-06-13 22:45:38 +090039#define DENALI_NR_BANKS 4
40
Masahiro Yamada43914a22014-09-09 11:01:51 +090041/*
Masahiro Yamada1bb88662017-06-13 22:45:37 +090042 * The bus interface clock, clk_x, is phase aligned with the core clock. The
43 * clk_x is an integral multiple N of the core clk. The value N is configured
44 * at IP delivery time, and its available value is 4, 5, or 6. We need to align
45 * to the largest value to make it work with any possible configuration.
Masahiro Yamada43914a22014-09-09 11:01:51 +090046 */
Masahiro Yamada1bb88662017-06-13 22:45:37 +090047#define DENALI_CLK_X_MULT 6
Jason Robertsce082592010-05-13 15:57:33 +010048
Masahiro Yamada43914a22014-09-09 11:01:51 +090049/*
50 * this macro allows us to convert from an MTD structure to our own
Jason Robertsce082592010-05-13 15:57:33 +010051 * device context (denali) structure.
52 */
Boris BREZILLON442f201b2015-12-11 15:06:00 +010053static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd)
54{
55 return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand);
56}
Jason Robertsce082592010-05-13 15:57:33 +010057
Masahiro Yamada43914a22014-09-09 11:01:51 +090058/*
Masahiro Yamada43914a22014-09-09 11:01:51 +090059 * this is a helper macro that allows us to
60 * format the bank into the proper bits for the controller
61 */
Jason Robertsce082592010-05-13 15:57:33 +010062#define BANK(x) ((x) << 24)
63
Masahiro Yamada43914a22014-09-09 11:01:51 +090064/*
65 * Certain operations for the denali NAND controller use an indexed mode to
66 * read/write data. The operation is performed by writing the address value
67 * of the command to the device memory followed by the data. This function
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +080068 * abstracts this common operation.
Masahiro Yamada43914a22014-09-09 11:01:51 +090069 */
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +080070static void index_addr(struct denali_nand_info *denali,
71 uint32_t address, uint32_t data)
Jason Robertsce082592010-05-13 15:57:33 +010072{
Chuanxiao Dong24c3fa32010-08-09 23:59:23 +080073 iowrite32(address, denali->flash_mem);
74 iowrite32(data, denali->flash_mem + 0x10);
Jason Robertsce082592010-05-13 15:57:33 +010075}
76
Masahiro Yamada43914a22014-09-09 11:01:51 +090077/*
Jamie Ilesc89eeda2011-05-06 15:28:57 +010078 * Use the configuration feature register to determine the maximum number of
79 * banks that the hardware supports.
80 */
81static void detect_max_banks(struct denali_nand_info *denali)
82{
83 uint32_t features = ioread32(denali->flash_reg + FEATURES);
84
Masahiro Yamadae7beeee2017-03-30 15:45:57 +090085 denali->max_banks = 1 << (features & FEATURES__N_BANKS);
86
87 /* the encoding changed from rev 5.0 to 5.1 */
88 if (denali->revision < 0x0501)
89 denali->max_banks <<= 1;
Jamie Ilesc89eeda2011-05-06 15:28:57 +010090}
91
Masahiro Yamadac19e31d2017-06-13 22:45:38 +090092static void denali_enable_irq(struct denali_nand_info *denali)
Jason Robertsce082592010-05-13 15:57:33 +010093{
Jamie Iles9589bf52011-05-06 15:28:56 +010094 int i;
95
Masahiro Yamadac19e31d2017-06-13 22:45:38 +090096 for (i = 0; i < DENALI_NR_BANKS; i++)
97 iowrite32(U32_MAX, denali->flash_reg + INTR_EN(i));
98 iowrite32(GLOBAL_INT_EN_FLAG, denali->flash_reg + GLOBAL_INT_ENABLE);
Jason Robertsce082592010-05-13 15:57:33 +010099}
100
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900101static void denali_disable_irq(struct denali_nand_info *denali)
Jason Robertsce082592010-05-13 15:57:33 +0100102{
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900103 int i;
104
105 for (i = 0; i < DENALI_NR_BANKS; i++)
106 iowrite32(0, denali->flash_reg + INTR_EN(i));
107 iowrite32(0, denali->flash_reg + GLOBAL_INT_ENABLE);
Jason Robertsce082592010-05-13 15:57:33 +0100108}
109
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900110static void denali_clear_irq(struct denali_nand_info *denali,
111 int bank, uint32_t irq_status)
Jason Robertsce082592010-05-13 15:57:33 +0100112{
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900113 /* write one to clear bits */
114 iowrite32(irq_status, denali->flash_reg + INTR_STATUS(bank));
Jason Robertsce082592010-05-13 15:57:33 +0100115}
116
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900117static void denali_clear_irq_all(struct denali_nand_info *denali)
Jason Robertsce082592010-05-13 15:57:33 +0100118{
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900119 int i;
Masahiro Yamada5637b692014-09-09 11:01:52 +0900120
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900121 for (i = 0; i < DENALI_NR_BANKS; i++)
122 denali_clear_irq(denali, i, U32_MAX);
Jason Robertsce082592010-05-13 15:57:33 +0100123}
124
Jason Robertsce082592010-05-13 15:57:33 +0100125static irqreturn_t denali_isr(int irq, void *dev_id)
126{
127 struct denali_nand_info *denali = dev_id;
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900128 irqreturn_t ret = IRQ_NONE;
Masahiro Yamada5637b692014-09-09 11:01:52 +0900129 uint32_t irq_status;
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900130 int i;
Jason Robertsce082592010-05-13 15:57:33 +0100131
132 spin_lock(&denali->irq_lock);
133
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900134 for (i = 0; i < DENALI_NR_BANKS; i++) {
135 irq_status = ioread32(denali->flash_reg + INTR_STATUS(i));
136 if (irq_status)
137 ret = IRQ_HANDLED;
138
139 denali_clear_irq(denali, i, irq_status);
140
141 if (i != denali->flash_bank)
142 continue;
143
144 denali->irq_status |= irq_status;
145
146 if (denali->irq_status & denali->irq_mask)
Jason Robertsce082592010-05-13 15:57:33 +0100147 complete(&denali->complete);
Jason Robertsce082592010-05-13 15:57:33 +0100148 }
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900149
Jason Robertsce082592010-05-13 15:57:33 +0100150 spin_unlock(&denali->irq_lock);
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900151
152 return ret;
Jason Robertsce082592010-05-13 15:57:33 +0100153}
Jason Robertsce082592010-05-13 15:57:33 +0100154
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900155static void denali_reset_irq(struct denali_nand_info *denali)
Jason Robertsce082592010-05-13 15:57:33 +0100156{
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900157 unsigned long flags;
Jason Robertsce082592010-05-13 15:57:33 +0100158
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900159 spin_lock_irqsave(&denali->irq_lock, flags);
160 denali->irq_status = 0;
161 denali->irq_mask = 0;
162 spin_unlock_irqrestore(&denali->irq_lock, flags);
163}
Jason Robertsce082592010-05-13 15:57:33 +0100164
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900165static uint32_t denali_wait_for_irq(struct denali_nand_info *denali,
166 uint32_t irq_mask)
167{
168 unsigned long time_left, flags;
169 uint32_t irq_status;
Masahiro Yamada81254502014-09-16 20:04:25 +0900170
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900171 spin_lock_irqsave(&denali->irq_lock, flags);
Jason Robertsce082592010-05-13 15:57:33 +0100172
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900173 irq_status = denali->irq_status;
Jason Robertsce082592010-05-13 15:57:33 +0100174
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900175 if (irq_mask & irq_status) {
176 /* return immediately if the IRQ has already happened. */
177 spin_unlock_irqrestore(&denali->irq_lock, flags);
178 return irq_status;
Jason Robertsce082592010-05-13 15:57:33 +0100179 }
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900180
181 denali->irq_mask = irq_mask;
182 reinit_completion(&denali->complete);
183 spin_unlock_irqrestore(&denali->irq_lock, flags);
184
185 time_left = wait_for_completion_timeout(&denali->complete,
186 msecs_to_jiffies(1000));
187 if (!time_left) {
188 dev_err(denali->dev, "timeout while waiting for irq 0x%x\n",
189 denali->irq_mask);
190 return 0;
191 }
192
193 return denali->irq_status;
194}
195
Masahiro Yamadafa6134e2017-06-13 22:45:39 +0900196static uint32_t denali_check_irq(struct denali_nand_info *denali)
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900197{
Masahiro Yamadafa6134e2017-06-13 22:45:39 +0900198 unsigned long flags;
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900199 uint32_t irq_status;
200
Masahiro Yamadafa6134e2017-06-13 22:45:39 +0900201 spin_lock_irqsave(&denali->irq_lock, flags);
202 irq_status = denali->irq_status;
203 spin_unlock_irqrestore(&denali->irq_lock, flags);
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900204
Masahiro Yamadafa6134e2017-06-13 22:45:39 +0900205 return irq_status;
Jason Robertsce082592010-05-13 15:57:33 +0100206}
207
Masahiro Yamada43914a22014-09-09 11:01:51 +0900208/*
209 * This helper function setups the registers for ECC and whether or not
210 * the spare area will be transferred.
211 */
Chuanxiao5bac3ac2010-08-05 23:06:04 +0800212static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en,
Jason Robertsce082592010-05-13 15:57:33 +0100213 bool transfer_spare)
214{
Masahiro Yamada5637b692014-09-09 11:01:52 +0900215 int ecc_en_flag, transfer_spare_flag;
Jason Robertsce082592010-05-13 15:57:33 +0100216
217 /* set ECC, transfer spare bits if needed */
218 ecc_en_flag = ecc_en ? ECC_ENABLE__FLAG : 0;
219 transfer_spare_flag = transfer_spare ? TRANSFER_SPARE_REG__FLAG : 0;
220
221 /* Enable spare area/ECC per user's request. */
Chuanxiao Dong24c3fa32010-08-09 23:59:23 +0800222 iowrite32(ecc_en_flag, denali->flash_reg + ECC_ENABLE);
Masahiro Yamada81254502014-09-16 20:04:25 +0900223 iowrite32(transfer_spare_flag, denali->flash_reg + TRANSFER_SPARE_REG);
Jason Robertsce082592010-05-13 15:57:33 +0100224}
225
Masahiro Yamadafa6134e2017-06-13 22:45:39 +0900226static void denali_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
227{
228 struct denali_nand_info *denali = mtd_to_denali(mtd);
229 int i;
230
231 iowrite32(MODE_11 | BANK(denali->flash_bank) | 2, denali->flash_mem);
232
233 for (i = 0; i < len; i++)
234 buf[i] = ioread32(denali->flash_mem + 0x10);
235}
236
237static void denali_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
238{
239 struct denali_nand_info *denali = mtd_to_denali(mtd);
240 int i;
241
242 iowrite32(MODE_11 | BANK(denali->flash_bank) | 2, denali->flash_mem);
243
244 for (i = 0; i < len; i++)
245 iowrite32(buf[i], denali->flash_mem + 0x10);
246}
247
248static void denali_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
249{
250 struct denali_nand_info *denali = mtd_to_denali(mtd);
251 uint16_t *buf16 = (uint16_t *)buf;
252 int i;
253
254 iowrite32(MODE_11 | BANK(denali->flash_bank) | 2, denali->flash_mem);
255
256 for (i = 0; i < len / 2; i++)
257 buf16[i] = ioread32(denali->flash_mem + 0x10);
258}
259
260static void denali_write_buf16(struct mtd_info *mtd, const uint8_t *buf,
261 int len)
262{
263 struct denali_nand_info *denali = mtd_to_denali(mtd);
264 const uint16_t *buf16 = (const uint16_t *)buf;
265 int i;
266
267 iowrite32(MODE_11 | BANK(denali->flash_bank) | 2, denali->flash_mem);
268
269 for (i = 0; i < len / 2; i++)
270 iowrite32(buf16[i], denali->flash_mem + 0x10);
271}
272
273static uint8_t denali_read_byte(struct mtd_info *mtd)
274{
275 uint8_t byte;
276
277 denali_read_buf(mtd, &byte, 1);
278
279 return byte;
280}
281
282static void denali_write_byte(struct mtd_info *mtd, uint8_t byte)
283{
284 denali_write_buf(mtd, &byte, 1);
285}
286
287static uint16_t denali_read_word(struct mtd_info *mtd)
288{
289 uint16_t word;
290
291 denali_read_buf16(mtd, (uint8_t *)&word, 2);
292
293 return word;
294}
295
296static void denali_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
297{
298 struct denali_nand_info *denali = mtd_to_denali(mtd);
299 uint32_t type;
300
301 if (ctrl & NAND_CLE)
302 type = 0;
303 else if (ctrl & NAND_ALE)
304 type = 1;
305 else
306 return;
307
308 /*
309 * Some commands are followed by chip->dev_ready or chip->waitfunc.
310 * irq_status must be cleared here to catch the R/B# interrupt later.
311 */
312 if (ctrl & NAND_CTRL_CHANGE)
313 denali_reset_irq(denali);
314
315 index_addr(denali, MODE_11 | BANK(denali->flash_bank) | type, dat);
316}
317
318static int denali_dev_ready(struct mtd_info *mtd)
319{
320 struct denali_nand_info *denali = mtd_to_denali(mtd);
321
322 return !!(denali_check_irq(denali) & INTR__INT_ACT);
323}
324
Masahiro Yamadad29109b2017-03-30 15:45:51 +0900325static int denali_check_erased_page(struct mtd_info *mtd,
326 struct nand_chip *chip, uint8_t *buf,
327 unsigned long uncor_ecc_flags,
328 unsigned int max_bitflips)
Jason Robertsce082592010-05-13 15:57:33 +0100329{
Masahiro Yamadad29109b2017-03-30 15:45:51 +0900330 uint8_t *ecc_code = chip->buffers->ecccode;
331 int ecc_steps = chip->ecc.steps;
332 int ecc_size = chip->ecc.size;
333 int ecc_bytes = chip->ecc.bytes;
334 int i, ret, stat;
Masahiro Yamada81254502014-09-16 20:04:25 +0900335
Masahiro Yamadad29109b2017-03-30 15:45:51 +0900336 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
337 chip->ecc.total);
338 if (ret)
339 return ret;
340
341 for (i = 0; i < ecc_steps; i++) {
342 if (!(uncor_ecc_flags & BIT(i)))
343 continue;
344
345 stat = nand_check_erased_ecc_chunk(buf, ecc_size,
346 ecc_code, ecc_bytes,
347 NULL, 0,
348 chip->ecc.strength);
349 if (stat < 0) {
350 mtd->ecc_stats.failed++;
351 } else {
352 mtd->ecc_stats.corrected += stat;
353 max_bitflips = max_t(unsigned int, max_bitflips, stat);
354 }
355
356 buf += ecc_size;
357 ecc_code += ecc_bytes;
358 }
359
360 return max_bitflips;
Jason Robertsce082592010-05-13 15:57:33 +0100361}
Masahiro Yamadad29109b2017-03-30 15:45:51 +0900362
Masahiro Yamada24715c72017-03-30 15:45:52 +0900363static int denali_hw_ecc_fixup(struct mtd_info *mtd,
364 struct denali_nand_info *denali,
365 unsigned long *uncor_ecc_flags)
366{
367 struct nand_chip *chip = mtd_to_nand(mtd);
368 int bank = denali->flash_bank;
369 uint32_t ecc_cor;
370 unsigned int max_bitflips;
371
372 ecc_cor = ioread32(denali->flash_reg + ECC_COR_INFO(bank));
373 ecc_cor >>= ECC_COR_INFO__SHIFT(bank);
374
375 if (ecc_cor & ECC_COR_INFO__UNCOR_ERR) {
376 /*
377 * This flag is set when uncorrectable error occurs at least in
378 * one ECC sector. We can not know "how many sectors", or
379 * "which sector(s)". We need erase-page check for all sectors.
380 */
381 *uncor_ecc_flags = GENMASK(chip->ecc.steps - 1, 0);
382 return 0;
383 }
384
385 max_bitflips = ecc_cor & ECC_COR_INFO__MAX_ERRORS;
386
387 /*
388 * The register holds the maximum of per-sector corrected bitflips.
389 * This is suitable for the return value of the ->read_page() callback.
390 * Unfortunately, we can not know the total number of corrected bits in
391 * the page. Increase the stats by max_bitflips. (compromised solution)
392 */
393 mtd->ecc_stats.corrected += max_bitflips;
394
395 return max_bitflips;
396}
397
Jason Robertsce082592010-05-13 15:57:33 +0100398#define ECC_SECTOR(x) (((x) & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12)
399#define ECC_BYTE(x) (((x) & ECC_ERROR_ADDRESS__OFFSET))
400#define ECC_CORRECTION_VALUE(x) ((x) & ERR_CORRECTION_INFO__BYTEMASK)
Masahiro Yamada20d48592017-03-30 15:45:50 +0900401#define ECC_ERROR_UNCORRECTABLE(x) ((x) & ERR_CORRECTION_INFO__ERROR_TYPE)
Chuanxiao Dong8ae61eb2010-08-10 00:07:01 +0800402#define ECC_ERR_DEVICE(x) (((x) & ERR_CORRECTION_INFO__DEVICE_NR) >> 8)
Jason Robertsce082592010-05-13 15:57:33 +0100403#define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO)
404
Masahiro Yamada24715c72017-03-30 15:45:52 +0900405static int denali_sw_ecc_fixup(struct mtd_info *mtd,
406 struct denali_nand_info *denali,
407 unsigned long *uncor_ecc_flags, uint8_t *buf)
Jason Robertsce082592010-05-13 15:57:33 +0100408{
Masahiro Yamada7de117f2017-06-07 20:52:12 +0900409 unsigned int ecc_size = denali->nand.ecc.size;
Mike Dunn3f91e942012-04-25 12:06:09 -0700410 unsigned int bitflips = 0;
Masahiro Yamada20d48592017-03-30 15:45:50 +0900411 unsigned int max_bitflips = 0;
412 uint32_t err_addr, err_cor_info;
413 unsigned int err_byte, err_sector, err_device;
414 uint8_t err_cor_value;
415 unsigned int prev_sector = 0;
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900416 uint32_t irq_status;
Jason Robertsce082592010-05-13 15:57:33 +0100417
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900418 denali_reset_irq(denali);
Jason Robertsce082592010-05-13 15:57:33 +0100419
Masahiro Yamada20d48592017-03-30 15:45:50 +0900420 do {
421 err_addr = ioread32(denali->flash_reg + ECC_ERROR_ADDRESS);
422 err_sector = ECC_SECTOR(err_addr);
423 err_byte = ECC_BYTE(err_addr);
Jason Robertsce082592010-05-13 15:57:33 +0100424
Masahiro Yamada20d48592017-03-30 15:45:50 +0900425 err_cor_info = ioread32(denali->flash_reg + ERR_CORRECTION_INFO);
426 err_cor_value = ECC_CORRECTION_VALUE(err_cor_info);
427 err_device = ECC_ERR_DEVICE(err_cor_info);
Jason Robertsce082592010-05-13 15:57:33 +0100428
Masahiro Yamada20d48592017-03-30 15:45:50 +0900429 /* reset the bitflip counter when crossing ECC sector */
430 if (err_sector != prev_sector)
431 bitflips = 0;
Masahiro Yamada81254502014-09-16 20:04:25 +0900432
Masahiro Yamada20d48592017-03-30 15:45:50 +0900433 if (ECC_ERROR_UNCORRECTABLE(err_cor_info)) {
434 /*
Masahiro Yamadad29109b2017-03-30 15:45:51 +0900435 * Check later if this is a real ECC error, or
436 * an erased sector.
Masahiro Yamada20d48592017-03-30 15:45:50 +0900437 */
Masahiro Yamadad29109b2017-03-30 15:45:51 +0900438 *uncor_ecc_flags |= BIT(err_sector);
Masahiro Yamada7de117f2017-06-07 20:52:12 +0900439 } else if (err_byte < ecc_size) {
Masahiro Yamada20d48592017-03-30 15:45:50 +0900440 /*
Masahiro Yamada7de117f2017-06-07 20:52:12 +0900441 * If err_byte is larger than ecc_size, means error
Masahiro Yamada20d48592017-03-30 15:45:50 +0900442 * happened in OOB, so we ignore it. It's no need for
443 * us to correct it err_device is represented the NAND
444 * error bits are happened in if there are more than
445 * one NAND connected.
446 */
447 int offset;
448 unsigned int flips_in_byte;
449
Masahiro Yamada7de117f2017-06-07 20:52:12 +0900450 offset = (err_sector * ecc_size + err_byte) *
Masahiro Yamada20d48592017-03-30 15:45:50 +0900451 denali->devnum + err_device;
452
453 /* correct the ECC error */
454 flips_in_byte = hweight8(buf[offset] ^ err_cor_value);
455 buf[offset] ^= err_cor_value;
456 mtd->ecc_stats.corrected += flips_in_byte;
457 bitflips += flips_in_byte;
458
459 max_bitflips = max(max_bitflips, bitflips);
460 }
461
462 prev_sector = err_sector;
463 } while (!ECC_LAST_ERR(err_cor_info));
464
465 /*
466 * Once handle all ecc errors, controller will trigger a
467 * ECC_TRANSACTION_DONE interrupt, so here just wait for
468 * a while for this interrupt
469 */
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900470 irq_status = denali_wait_for_irq(denali, INTR__ECC_TRANSACTION_DONE);
471 if (!(irq_status & INTR__ECC_TRANSACTION_DONE))
472 return -EIO;
Masahiro Yamada20d48592017-03-30 15:45:50 +0900473
474 return max_bitflips;
Jason Robertsce082592010-05-13 15:57:33 +0100475}
476
477/* programs the controller to either enable/disable DMA transfers */
David Woodhouseaadff492010-05-13 16:12:43 +0100478static void denali_enable_dma(struct denali_nand_info *denali, bool en)
Jason Robertsce082592010-05-13 15:57:33 +0100479{
Masahiro Yamada5637b692014-09-09 11:01:52 +0900480 iowrite32(en ? DMA_ENABLE__FLAG : 0, denali->flash_reg + DMA_ENABLE);
Jason Robertsce082592010-05-13 15:57:33 +0100481 ioread32(denali->flash_reg + DMA_ENABLE);
482}
483
Masahiro Yamada2291cb82017-06-13 22:45:42 +0900484static void denali_setup_dma64(struct denali_nand_info *denali,
Masahiro Yamada96a376b2017-06-13 22:45:44 +0900485 dma_addr_t dma_addr, int page, int write)
Masahiro Yamada210a2c82017-03-30 15:45:54 +0900486{
487 uint32_t mode;
488 const int page_count = 1;
Masahiro Yamada210a2c82017-03-30 15:45:54 +0900489
Masahiro Yamada2291cb82017-06-13 22:45:42 +0900490 mode = MODE_10 | BANK(denali->flash_bank) | page;
Masahiro Yamada210a2c82017-03-30 15:45:54 +0900491
492 /* DMA is a three step process */
493
494 /*
495 * 1. setup transfer type, interrupt when complete,
496 * burst len = 64 bytes, the number of pages
497 */
Masahiro Yamada96a376b2017-06-13 22:45:44 +0900498 index_addr(denali, mode,
499 0x01002000 | (64 << 16) | (write << 8) | page_count);
Masahiro Yamada210a2c82017-03-30 15:45:54 +0900500
501 /* 2. set memory low address */
Masahiro Yamada2291cb82017-06-13 22:45:42 +0900502 index_addr(denali, mode, dma_addr);
Masahiro Yamada210a2c82017-03-30 15:45:54 +0900503
504 /* 3. set memory high address */
Masahiro Yamada2291cb82017-06-13 22:45:42 +0900505 index_addr(denali, mode, (uint64_t)dma_addr >> 32);
Masahiro Yamada210a2c82017-03-30 15:45:54 +0900506}
507
Masahiro Yamada2291cb82017-06-13 22:45:42 +0900508static void denali_setup_dma32(struct denali_nand_info *denali,
Masahiro Yamada96a376b2017-06-13 22:45:44 +0900509 dma_addr_t dma_addr, int page, int write)
Jason Robertsce082592010-05-13 15:57:33 +0100510{
Masahiro Yamada5637b692014-09-09 11:01:52 +0900511 uint32_t mode;
Jason Robertsce082592010-05-13 15:57:33 +0100512 const int page_count = 1;
Jason Robertsce082592010-05-13 15:57:33 +0100513
514 mode = MODE_10 | BANK(denali->flash_bank);
515
516 /* DMA is a four step process */
517
518 /* 1. setup transfer type and # of pages */
Masahiro Yamada96a376b2017-06-13 22:45:44 +0900519 index_addr(denali, mode | page, 0x2000 | (write << 8) | page_count);
Jason Robertsce082592010-05-13 15:57:33 +0100520
521 /* 2. set memory high address bits 23:8 */
Masahiro Yamada2291cb82017-06-13 22:45:42 +0900522 index_addr(denali, mode | ((dma_addr >> 16) << 8), 0x2200);
Jason Robertsce082592010-05-13 15:57:33 +0100523
524 /* 3. set memory low address bits 23:8 */
Masahiro Yamada2291cb82017-06-13 22:45:42 +0900525 index_addr(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300);
Jason Robertsce082592010-05-13 15:57:33 +0100526
Masahiro Yamada43914a22014-09-09 11:01:51 +0900527 /* 4. interrupt when complete, burst len = 64 bytes */
Jason Robertsce082592010-05-13 15:57:33 +0100528 index_addr(denali, mode | 0x14000, 0x2400);
529}
530
Masahiro Yamada2291cb82017-06-13 22:45:42 +0900531static void denali_setup_dma(struct denali_nand_info *denali,
Masahiro Yamada96a376b2017-06-13 22:45:44 +0900532 dma_addr_t dma_addr, int page, int write)
Masahiro Yamada210a2c82017-03-30 15:45:54 +0900533{
534 if (denali->caps & DENALI_CAP_DMA_64BIT)
Masahiro Yamada96a376b2017-06-13 22:45:44 +0900535 denali_setup_dma64(denali, dma_addr, page, write);
Masahiro Yamada210a2c82017-03-30 15:45:54 +0900536 else
Masahiro Yamada96a376b2017-06-13 22:45:44 +0900537 denali_setup_dma32(denali, dma_addr, page, write);
Masahiro Yamada210a2c82017-03-30 15:45:54 +0900538}
539
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900540static int denali_pio_read(struct denali_nand_info *denali, void *buf,
541 size_t size, int page, int raw)
Jason Robertsce082592010-05-13 15:57:33 +0100542{
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900543 uint32_t addr = BANK(denali->flash_bank) | page;
544 uint32_t *buf32 = (uint32_t *)buf;
545 uint32_t irq_status, ecc_err_mask;
546 int i;
Masahiro Yamadab21ff822017-06-13 22:45:35 +0900547
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900548 if (denali->caps & DENALI_CAP_HW_ECC_FIXUP)
549 ecc_err_mask = INTR__ECC_UNCOR_ERR;
550 else
551 ecc_err_mask = INTR__ECC_ERR;
Jason Robertsce082592010-05-13 15:57:33 +0100552
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900553 denali_reset_irq(denali);
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900554
555 iowrite32(MODE_01 | addr, denali->flash_mem);
556 for (i = 0; i < size / 4; i++)
557 *buf32++ = ioread32(denali->flash_mem + 0x10);
558
559 irq_status = denali_wait_for_irq(denali, INTR__PAGE_XFER_INC);
560 if (!(irq_status & INTR__PAGE_XFER_INC))
561 return -EIO;
562
Masahiro Yamada57a4d8b2017-06-13 22:45:46 +0900563 if (irq_status & INTR__ERASED_PAGE)
564 memset(buf, 0xff, size);
565
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900566 return irq_status & ecc_err_mask ? -EBADMSG : 0;
567}
568
569static int denali_pio_write(struct denali_nand_info *denali,
570 const void *buf, size_t size, int page, int raw)
571{
572 uint32_t addr = BANK(denali->flash_bank) | page;
573 const uint32_t *buf32 = (uint32_t *)buf;
574 uint32_t irq_status;
575 int i;
576
577 denali_reset_irq(denali);
578
579 iowrite32(MODE_01 | addr, denali->flash_mem);
580 for (i = 0; i < size / 4; i++)
581 iowrite32(*buf32++, denali->flash_mem + 0x10);
582
583 irq_status = denali_wait_for_irq(denali,
584 INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL);
585 if (!(irq_status & INTR__PROGRAM_COMP))
586 return -EIO;
587
588 return 0;
589}
590
591static int denali_pio_xfer(struct denali_nand_info *denali, void *buf,
592 size_t size, int page, int raw, int write)
593{
594 if (write)
595 return denali_pio_write(denali, buf, size, page, raw);
596 else
597 return denali_pio_read(denali, buf, size, page, raw);
598}
599
600static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
601 size_t size, int page, int raw, int write)
602{
Masahiro Yamada997cde22017-06-13 22:45:47 +0900603 dma_addr_t dma_addr;
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900604 uint32_t irq_mask, irq_status, ecc_err_mask;
605 enum dma_data_direction dir = write ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
606 int ret = 0;
607
Masahiro Yamada997cde22017-06-13 22:45:47 +0900608 dma_addr = dma_map_single(denali->dev, buf, size, dir);
609 if (dma_mapping_error(denali->dev, dma_addr)) {
610 dev_dbg(denali->dev, "Failed to DMA-map buffer. Trying PIO.\n");
611 return denali_pio_xfer(denali, buf, size, page, raw, write);
612 }
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900613
614 if (write) {
615 /*
616 * INTR__PROGRAM_COMP is never asserted for the DMA transfer.
617 * We can use INTR__DMA_CMD_COMP instead. This flag is asserted
618 * when the page program is completed.
619 */
620 irq_mask = INTR__DMA_CMD_COMP | INTR__PROGRAM_FAIL;
621 ecc_err_mask = 0;
622 } else if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) {
623 irq_mask = INTR__DMA_CMD_COMP;
624 ecc_err_mask = INTR__ECC_UNCOR_ERR;
625 } else {
626 irq_mask = INTR__DMA_CMD_COMP;
627 ecc_err_mask = INTR__ECC_ERR;
628 }
629
Chuanxiao5bac3ac2010-08-05 23:06:04 +0800630 denali_enable_dma(denali, true);
Jason Robertsce082592010-05-13 15:57:33 +0100631
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900632 denali_reset_irq(denali);
633 denali_setup_dma(denali, dma_addr, page, write);
Jason Robertsce082592010-05-13 15:57:33 +0100634
635 /* wait for operation to complete */
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900636 irq_status = denali_wait_for_irq(denali, irq_mask);
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900637 if (!(irq_status & INTR__DMA_CMD_COMP))
Masahiro Yamadab21ff822017-06-13 22:45:35 +0900638 ret = -EIO;
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900639 else if (irq_status & ecc_err_mask)
640 ret = -EBADMSG;
Jason Robertsce082592010-05-13 15:57:33 +0100641
Chuanxiao5bac3ac2010-08-05 23:06:04 +0800642 denali_enable_dma(denali, false);
Masahiro Yamada997cde22017-06-13 22:45:47 +0900643 dma_unmap_single(denali->dev, dma_addr, size, dir);
Josh Wufdbad98d2012-06-25 18:07:45 +0800644
Masahiro Yamada57a4d8b2017-06-13 22:45:46 +0900645 if (irq_status & INTR__ERASED_PAGE)
646 memset(buf, 0xff, size);
647
Masahiro Yamadab21ff822017-06-13 22:45:35 +0900648 return ret;
Jason Robertsce082592010-05-13 15:57:33 +0100649}
650
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900651static int denali_data_xfer(struct denali_nand_info *denali, void *buf,
652 size_t size, int page, int raw, int write)
Jason Robertsce082592010-05-13 15:57:33 +0100653{
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900654 setup_ecc_for_xfer(denali, !raw, raw);
655
656 if (denali->dma_avail)
657 return denali_dma_xfer(denali, buf, size, page, raw, write);
658 else
659 return denali_pio_xfer(denali, buf, size, page, raw, write);
Jason Robertsce082592010-05-13 15:57:33 +0100660}
661
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900662static void denali_oob_xfer(struct mtd_info *mtd, struct nand_chip *chip,
663 int page, int write)
Jason Robertsce082592010-05-13 15:57:33 +0100664{
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900665 struct denali_nand_info *denali = mtd_to_denali(mtd);
666 unsigned int start_cmd = write ? NAND_CMD_SEQIN : NAND_CMD_READ0;
667 unsigned int rnd_cmd = write ? NAND_CMD_RNDIN : NAND_CMD_RNDOUT;
668 int writesize = mtd->writesize;
669 int oobsize = mtd->oobsize;
670 uint8_t *bufpoi = chip->oob_poi;
671 int ecc_steps = chip->ecc.steps;
672 int ecc_size = chip->ecc.size;
673 int ecc_bytes = chip->ecc.bytes;
674 int oob_skip = denali->bbtskipbytes;
675 size_t size = writesize + oobsize;
676 int i, pos, len;
677
678 /* BBM at the beginning of the OOB area */
679 chip->cmdfunc(mtd, start_cmd, writesize, page);
680 if (write)
681 chip->write_buf(mtd, bufpoi, oob_skip);
682 else
683 chip->read_buf(mtd, bufpoi, oob_skip);
684 bufpoi += oob_skip;
685
686 /* OOB ECC */
687 for (i = 0; i < ecc_steps; i++) {
688 pos = ecc_size + i * (ecc_size + ecc_bytes);
689 len = ecc_bytes;
690
691 if (pos >= writesize)
692 pos += oob_skip;
693 else if (pos + len > writesize)
694 len = writesize - pos;
695
696 chip->cmdfunc(mtd, rnd_cmd, pos, -1);
697 if (write)
698 chip->write_buf(mtd, bufpoi, len);
699 else
700 chip->read_buf(mtd, bufpoi, len);
701 bufpoi += len;
702 if (len < ecc_bytes) {
703 len = ecc_bytes - len;
704 chip->cmdfunc(mtd, rnd_cmd, writesize + oob_skip, -1);
705 if (write)
706 chip->write_buf(mtd, bufpoi, len);
707 else
708 chip->read_buf(mtd, bufpoi, len);
709 bufpoi += len;
710 }
711 }
712
713 /* OOB free */
714 len = oobsize - (bufpoi - chip->oob_poi);
715 chip->cmdfunc(mtd, rnd_cmd, size - len, -1);
716 if (write)
717 chip->write_buf(mtd, bufpoi, len);
718 else
719 chip->read_buf(mtd, bufpoi, len);
Jason Robertsce082592010-05-13 15:57:33 +0100720}
721
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900722static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
723 uint8_t *buf, int oob_required, int page)
Jason Robertsce082592010-05-13 15:57:33 +0100724{
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900725 struct denali_nand_info *denali = mtd_to_denali(mtd);
726 int writesize = mtd->writesize;
727 int oobsize = mtd->oobsize;
728 int ecc_steps = chip->ecc.steps;
729 int ecc_size = chip->ecc.size;
730 int ecc_bytes = chip->ecc.bytes;
731 void *dma_buf = denali->buf;
732 int oob_skip = denali->bbtskipbytes;
733 size_t size = writesize + oobsize;
734 int ret, i, pos, len;
735
736 ret = denali_data_xfer(denali, dma_buf, size, page, 1, 0);
737 if (ret)
738 return ret;
739
740 /* Arrange the buffer for syndrome payload/ecc layout */
741 if (buf) {
742 for (i = 0; i < ecc_steps; i++) {
743 pos = i * (ecc_size + ecc_bytes);
744 len = ecc_size;
745
746 if (pos >= writesize)
747 pos += oob_skip;
748 else if (pos + len > writesize)
749 len = writesize - pos;
750
751 memcpy(buf, dma_buf + pos, len);
752 buf += len;
753 if (len < ecc_size) {
754 len = ecc_size - len;
755 memcpy(buf, dma_buf + writesize + oob_skip,
756 len);
757 buf += len;
758 }
759 }
760 }
761
762 if (oob_required) {
763 uint8_t *oob = chip->oob_poi;
764
765 /* BBM at the beginning of the OOB area */
766 memcpy(oob, dma_buf + writesize, oob_skip);
767 oob += oob_skip;
768
769 /* OOB ECC */
770 for (i = 0; i < ecc_steps; i++) {
771 pos = ecc_size + i * (ecc_size + ecc_bytes);
772 len = ecc_bytes;
773
774 if (pos >= writesize)
775 pos += oob_skip;
776 else if (pos + len > writesize)
777 len = writesize - pos;
778
779 memcpy(oob, dma_buf + pos, len);
780 oob += len;
781 if (len < ecc_bytes) {
782 len = ecc_bytes - len;
783 memcpy(oob, dma_buf + writesize + oob_skip,
784 len);
785 oob += len;
786 }
787 }
788
789 /* OOB free */
790 len = oobsize - (oob - chip->oob_poi);
791 memcpy(oob, dma_buf + size - len, len);
792 }
793
794 return 0;
Jason Robertsce082592010-05-13 15:57:33 +0100795}
796
Chuanxiao5bac3ac2010-08-05 23:06:04 +0800797static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
Shmulik Ladkani5c2ffb12012-05-09 13:06:35 +0300798 int page)
Jason Robertsce082592010-05-13 15:57:33 +0100799{
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900800 denali_oob_xfer(mtd, chip, page, 0);
Jason Robertsce082592010-05-13 15:57:33 +0100801
Shmulik Ladkani5c2ffb12012-05-09 13:06:35 +0300802 return 0;
Jason Robertsce082592010-05-13 15:57:33 +0100803}
804
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900805static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
806 int page)
807{
808 struct denali_nand_info *denali = mtd_to_denali(mtd);
809 int status;
810
811 denali_reset_irq(denali);
812
813 denali_oob_xfer(mtd, chip, page, 1);
814
815 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
816 status = chip->waitfunc(mtd, chip);
817
818 return status & NAND_STATUS_FAIL ? -EIO : 0;
819}
820
Jason Robertsce082592010-05-13 15:57:33 +0100821static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
Brian Norris1fbb9382012-05-02 10:14:55 -0700822 uint8_t *buf, int oob_required, int page)
Jason Robertsce082592010-05-13 15:57:33 +0100823{
824 struct denali_nand_info *denali = mtd_to_denali(mtd);
Masahiro Yamadad29109b2017-03-30 15:45:51 +0900825 unsigned long uncor_ecc_flags = 0;
826 int stat = 0;
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900827 int ret;
Jason Robertsce082592010-05-13 15:57:33 +0100828
Masahiro Yamada997cde22017-06-13 22:45:47 +0900829 ret = denali_data_xfer(denali, buf, mtd->writesize, page, 0, 0);
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900830 if (ret && ret != -EBADMSG)
831 return ret;
Jason Robertsce082592010-05-13 15:57:33 +0100832
Masahiro Yamada24715c72017-03-30 15:45:52 +0900833 if (denali->caps & DENALI_CAP_HW_ECC_FIXUP)
834 stat = denali_hw_ecc_fixup(mtd, denali, &uncor_ecc_flags);
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900835 else if (ret == -EBADMSG)
Masahiro Yamada24715c72017-03-30 15:45:52 +0900836 stat = denali_sw_ecc_fixup(mtd, denali, &uncor_ecc_flags, buf);
Jason Robertsce082592010-05-13 15:57:33 +0100837
Masahiro Yamadad29109b2017-03-30 15:45:51 +0900838 if (stat < 0)
839 return stat;
840
841 if (uncor_ecc_flags) {
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900842 ret = denali_read_oob(mtd, chip, page);
843 if (ret)
844 return ret;
Jason Robertsce082592010-05-13 15:57:33 +0100845
Masahiro Yamadad29109b2017-03-30 15:45:51 +0900846 stat = denali_check_erased_page(mtd, chip, buf,
847 uncor_ecc_flags, stat);
Jason Robertsce082592010-05-13 15:57:33 +0100848 }
Masahiro Yamadad29109b2017-03-30 15:45:51 +0900849
850 return stat;
Jason Robertsce082592010-05-13 15:57:33 +0100851}
852
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900853static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
854 const uint8_t *buf, int oob_required, int page)
Jason Robertsce082592010-05-13 15:57:33 +0100855{
856 struct denali_nand_info *denali = mtd_to_denali(mtd);
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900857 int writesize = mtd->writesize;
858 int oobsize = mtd->oobsize;
859 int ecc_steps = chip->ecc.steps;
860 int ecc_size = chip->ecc.size;
861 int ecc_bytes = chip->ecc.bytes;
862 void *dma_buf = denali->buf;
863 int oob_skip = denali->bbtskipbytes;
864 size_t size = writesize + oobsize;
865 int i, pos, len;
Chuanxiao5bac3ac2010-08-05 23:06:04 +0800866
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900867 /*
868 * Fill the buffer with 0xff first except the full page transfer.
869 * This simplifies the logic.
870 */
871 if (!buf || !oob_required)
872 memset(dma_buf, 0xff, size);
Jason Robertsce082592010-05-13 15:57:33 +0100873
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900874 /* Arrange the buffer for syndrome payload/ecc layout */
875 if (buf) {
876 for (i = 0; i < ecc_steps; i++) {
877 pos = i * (ecc_size + ecc_bytes);
878 len = ecc_size;
Jason Robertsce082592010-05-13 15:57:33 +0100879
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900880 if (pos >= writesize)
881 pos += oob_skip;
882 else if (pos + len > writesize)
883 len = writesize - pos;
Jason Robertsce082592010-05-13 15:57:33 +0100884
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900885 memcpy(dma_buf + pos, buf, len);
886 buf += len;
887 if (len < ecc_size) {
888 len = ecc_size - len;
889 memcpy(dma_buf + writesize + oob_skip, buf,
890 len);
891 buf += len;
892 }
893 }
894 }
Jason Robertsce082592010-05-13 15:57:33 +0100895
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900896 if (oob_required) {
897 const uint8_t *oob = chip->oob_poi;
Jason Robertsce082592010-05-13 15:57:33 +0100898
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900899 /* BBM at the beginning of the OOB area */
900 memcpy(dma_buf + writesize, oob, oob_skip);
901 oob += oob_skip;
Jason Robertsce082592010-05-13 15:57:33 +0100902
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900903 /* OOB ECC */
904 for (i = 0; i < ecc_steps; i++) {
905 pos = ecc_size + i * (ecc_size + ecc_bytes);
906 len = ecc_bytes;
Jason Robertsce082592010-05-13 15:57:33 +0100907
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900908 if (pos >= writesize)
909 pos += oob_skip;
910 else if (pos + len > writesize)
911 len = writesize - pos;
912
913 memcpy(dma_buf + pos, oob, len);
914 oob += len;
915 if (len < ecc_bytes) {
916 len = ecc_bytes - len;
917 memcpy(dma_buf + writesize + oob_skip, oob,
918 len);
919 oob += len;
920 }
921 }
922
923 /* OOB free */
924 len = oobsize - (oob - chip->oob_poi);
925 memcpy(dma_buf + size - len, oob, len);
926 }
927
928 return denali_data_xfer(denali, dma_buf, size, page, 1, 1);
929}
930
931static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
932 const uint8_t *buf, int oob_required, int page)
933{
934 struct denali_nand_info *denali = mtd_to_denali(mtd);
935
Masahiro Yamada997cde22017-06-13 22:45:47 +0900936 return denali_data_xfer(denali, (void *)buf, mtd->writesize,
937 page, 0, 1);
Jason Robertsce082592010-05-13 15:57:33 +0100938}
939
Jason Robertsce082592010-05-13 15:57:33 +0100940static void denali_select_chip(struct mtd_info *mtd, int chip)
941{
942 struct denali_nand_info *denali = mtd_to_denali(mtd);
Chuanxiao Dong7cfffac2010-08-10 00:16:51 +0800943
Jason Robertsce082592010-05-13 15:57:33 +0100944 denali->flash_bank = chip;
Jason Robertsce082592010-05-13 15:57:33 +0100945}
946
947static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
948{
Masahiro Yamadafa6134e2017-06-13 22:45:39 +0900949 struct denali_nand_info *denali = mtd_to_denali(mtd);
950 uint32_t irq_status;
951
952 /* R/B# pin transitioned from low to high? */
953 irq_status = denali_wait_for_irq(denali, INTR__INT_ACT);
954
955 return irq_status & INTR__INT_ACT ? 0 : NAND_STATUS_FAIL;
Jason Robertsce082592010-05-13 15:57:33 +0100956}
957
Brian Norris49c50b92014-05-06 16:02:19 -0700958static int denali_erase(struct mtd_info *mtd, int page)
Jason Robertsce082592010-05-13 15:57:33 +0100959{
960 struct denali_nand_info *denali = mtd_to_denali(mtd);
Masahiro Yamada5637b692014-09-09 11:01:52 +0900961 uint32_t cmd, irq_status;
Jason Robertsce082592010-05-13 15:57:33 +0100962
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900963 denali_reset_irq(denali);
Jason Robertsce082592010-05-13 15:57:33 +0100964
965 /* setup page read request for access type */
966 cmd = MODE_10 | BANK(denali->flash_bank) | page;
Masahiro Yamada3157d1e2014-09-09 11:01:53 +0900967 index_addr(denali, cmd, 0x1);
Jason Robertsce082592010-05-13 15:57:33 +0100968
969 /* wait for erase to complete or failure to occur */
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900970 irq_status = denali_wait_for_irq(denali,
971 INTR__ERASE_COMP | INTR__ERASE_FAIL);
Jason Robertsce082592010-05-13 15:57:33 +0100972
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900973 return irq_status & INTR__ERASE_COMP ? 0 : NAND_STATUS_FAIL;
Jason Robertsce082592010-05-13 15:57:33 +0100974}
975
Masahiro Yamada1bb88662017-06-13 22:45:37 +0900976#define DIV_ROUND_DOWN_ULL(ll, d) \
977 ({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; })
978
979static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
980 const struct nand_data_interface *conf)
981{
982 struct denali_nand_info *denali = mtd_to_denali(mtd);
983 const struct nand_sdr_timings *timings;
984 unsigned long t_clk;
985 int acc_clks, re_2_we, re_2_re, we_2_re, addr_2_data;
986 int rdwr_en_lo, rdwr_en_hi, rdwr_en_lo_hi, cs_setup;
987 int addr_2_data_mask;
988 uint32_t tmp;
989
990 timings = nand_get_sdr_timings(conf);
991 if (IS_ERR(timings))
992 return PTR_ERR(timings);
993
994 /* clk_x period in picoseconds */
995 t_clk = DIV_ROUND_DOWN_ULL(1000000000000ULL, denali->clk_x_rate);
996 if (!t_clk)
997 return -EINVAL;
998
999 if (chipnr == NAND_DATA_IFACE_CHECK_ONLY)
1000 return 0;
1001
1002 /* tREA -> ACC_CLKS */
1003 acc_clks = DIV_ROUND_UP(timings->tREA_max, t_clk);
1004 acc_clks = min_t(int, acc_clks, ACC_CLKS__VALUE);
1005
1006 tmp = ioread32(denali->flash_reg + ACC_CLKS);
1007 tmp &= ~ACC_CLKS__VALUE;
1008 tmp |= acc_clks;
1009 iowrite32(tmp, denali->flash_reg + ACC_CLKS);
1010
1011 /* tRWH -> RE_2_WE */
1012 re_2_we = DIV_ROUND_UP(timings->tRHW_min, t_clk);
1013 re_2_we = min_t(int, re_2_we, RE_2_WE__VALUE);
1014
1015 tmp = ioread32(denali->flash_reg + RE_2_WE);
1016 tmp &= ~RE_2_WE__VALUE;
1017 tmp |= re_2_we;
1018 iowrite32(tmp, denali->flash_reg + RE_2_WE);
1019
1020 /* tRHZ -> RE_2_RE */
1021 re_2_re = DIV_ROUND_UP(timings->tRHZ_max, t_clk);
1022 re_2_re = min_t(int, re_2_re, RE_2_RE__VALUE);
1023
1024 tmp = ioread32(denali->flash_reg + RE_2_RE);
1025 tmp &= ~RE_2_RE__VALUE;
1026 tmp |= re_2_re;
1027 iowrite32(tmp, denali->flash_reg + RE_2_RE);
1028
1029 /* tWHR -> WE_2_RE */
1030 we_2_re = DIV_ROUND_UP(timings->tWHR_min, t_clk);
1031 we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE);
1032
1033 tmp = ioread32(denali->flash_reg + TWHR2_AND_WE_2_RE);
1034 tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE;
1035 tmp |= we_2_re;
1036 iowrite32(tmp, denali->flash_reg + TWHR2_AND_WE_2_RE);
1037
1038 /* tADL -> ADDR_2_DATA */
1039
1040 /* for older versions, ADDR_2_DATA is only 6 bit wide */
1041 addr_2_data_mask = TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA;
1042 if (denali->revision < 0x0501)
1043 addr_2_data_mask >>= 1;
1044
1045 addr_2_data = DIV_ROUND_UP(timings->tADL_min, t_clk);
1046 addr_2_data = min_t(int, addr_2_data, addr_2_data_mask);
1047
1048 tmp = ioread32(denali->flash_reg + TCWAW_AND_ADDR_2_DATA);
1049 tmp &= ~addr_2_data_mask;
1050 tmp |= addr_2_data;
1051 iowrite32(tmp, denali->flash_reg + TCWAW_AND_ADDR_2_DATA);
1052
1053 /* tREH, tWH -> RDWR_EN_HI_CNT */
1054 rdwr_en_hi = DIV_ROUND_UP(max(timings->tREH_min, timings->tWH_min),
1055 t_clk);
1056 rdwr_en_hi = min_t(int, rdwr_en_hi, RDWR_EN_HI_CNT__VALUE);
1057
1058 tmp = ioread32(denali->flash_reg + RDWR_EN_HI_CNT);
1059 tmp &= ~RDWR_EN_HI_CNT__VALUE;
1060 tmp |= rdwr_en_hi;
1061 iowrite32(tmp, denali->flash_reg + RDWR_EN_HI_CNT);
1062
1063 /* tRP, tWP -> RDWR_EN_LO_CNT */
1064 rdwr_en_lo = DIV_ROUND_UP(max(timings->tRP_min, timings->tWP_min),
1065 t_clk);
1066 rdwr_en_lo_hi = DIV_ROUND_UP(max(timings->tRC_min, timings->tWC_min),
1067 t_clk);
1068 rdwr_en_lo_hi = max(rdwr_en_lo_hi, DENALI_CLK_X_MULT);
1069 rdwr_en_lo = max(rdwr_en_lo, rdwr_en_lo_hi - rdwr_en_hi);
1070 rdwr_en_lo = min_t(int, rdwr_en_lo, RDWR_EN_LO_CNT__VALUE);
1071
1072 tmp = ioread32(denali->flash_reg + RDWR_EN_LO_CNT);
1073 tmp &= ~RDWR_EN_LO_CNT__VALUE;
1074 tmp |= rdwr_en_lo;
1075 iowrite32(tmp, denali->flash_reg + RDWR_EN_LO_CNT);
1076
1077 /* tCS, tCEA -> CS_SETUP_CNT */
1078 cs_setup = max3((int)DIV_ROUND_UP(timings->tCS_min, t_clk) - rdwr_en_lo,
1079 (int)DIV_ROUND_UP(timings->tCEA_max, t_clk) - acc_clks,
1080 0);
1081 cs_setup = min_t(int, cs_setup, CS_SETUP_CNT__VALUE);
1082
1083 tmp = ioread32(denali->flash_reg + CS_SETUP_CNT);
1084 tmp &= ~CS_SETUP_CNT__VALUE;
1085 tmp |= cs_setup;
1086 iowrite32(tmp, denali->flash_reg + CS_SETUP_CNT);
1087
1088 return 0;
1089}
Jason Robertsce082592010-05-13 15:57:33 +01001090
Masahiro Yamadaf4862872017-06-13 22:45:40 +09001091static void denali_reset_banks(struct denali_nand_info *denali)
1092{
Masahiro Yamadad49f5792017-06-13 22:45:41 +09001093 u32 irq_status;
Masahiro Yamadaf4862872017-06-13 22:45:40 +09001094 int i;
1095
Masahiro Yamadaf4862872017-06-13 22:45:40 +09001096 for (i = 0; i < denali->max_banks; i++) {
Masahiro Yamadad49f5792017-06-13 22:45:41 +09001097 denali->flash_bank = i;
1098
1099 denali_reset_irq(denali);
1100
1101 iowrite32(DEVICE_RESET__BANK(i),
1102 denali->flash_reg + DEVICE_RESET);
1103
1104 irq_status = denali_wait_for_irq(denali,
1105 INTR__RST_COMP | INTR__INT_ACT | INTR__TIME_OUT);
1106 if (!(irq_status & INTR__INT_ACT))
Masahiro Yamadaf4862872017-06-13 22:45:40 +09001107 break;
1108 }
1109
1110 dev_dbg(denali->dev, "%d chips connected\n", i);
1111 denali->max_banks = i;
Masahiro Yamadaf4862872017-06-13 22:45:40 +09001112}
1113
Jason Robertsce082592010-05-13 15:57:33 +01001114static void denali_hw_init(struct denali_nand_info *denali)
1115{
Masahiro Yamada43914a22014-09-09 11:01:51 +09001116 /*
Masahiro Yamadae7beeee2017-03-30 15:45:57 +09001117 * The REVISION register may not be reliable. Platforms are allowed to
1118 * override it.
1119 */
1120 if (!denali->revision)
1121 denali->revision =
1122 swab16(ioread32(denali->flash_reg + REVISION));
1123
1124 /*
Masahiro Yamada43914a22014-09-09 11:01:51 +09001125 * tell driver how many bit controller will skip before
Chuanxiao Dongdb9a32102010-08-06 18:02:03 +08001126 * writing ECC code in OOB, this register may be already
1127 * set by firmware. So we read this value out.
1128 * if this value is 0, just let it be.
Masahiro Yamada43914a22014-09-09 11:01:51 +09001129 */
Chuanxiao Dongdb9a32102010-08-06 18:02:03 +08001130 denali->bbtskipbytes = ioread32(denali->flash_reg +
1131 SPARE_AREA_SKIP_BYTES);
Jamie Ilesbc27ede2011-06-06 17:11:34 +01001132 detect_max_banks(denali);
Chuanxiao Dong24c3fa32010-08-09 23:59:23 +08001133 iowrite32(0x0F, denali->flash_reg + RB_PIN_ENABLED);
1134 iowrite32(CHIP_EN_DONT_CARE__FLAG,
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +08001135 denali->flash_reg + CHIP_ENABLE_DONT_CARE);
Jason Robertsce082592010-05-13 15:57:33 +01001136
Chuanxiao Dong24c3fa32010-08-09 23:59:23 +08001137 iowrite32(0xffff, denali->flash_reg + SPARE_AREA_MARKER);
Jason Robertsce082592010-05-13 15:57:33 +01001138
1139 /* Should set value for these registers when init */
Chuanxiao Dong24c3fa32010-08-09 23:59:23 +08001140 iowrite32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES);
1141 iowrite32(1, denali->flash_reg + ECC_ENABLE);
Jason Robertsce082592010-05-13 15:57:33 +01001142}
1143
Masahiro Yamada7de117f2017-06-07 20:52:12 +09001144int denali_calc_ecc_bytes(int step_size, int strength)
1145{
1146 /* BCH code. Denali requires ecc.bytes to be multiple of 2 */
1147 return DIV_ROUND_UP(strength * fls(step_size * 8), 16) * 2;
1148}
1149EXPORT_SYMBOL(denali_calc_ecc_bytes);
1150
1151static int denali_ecc_setup(struct mtd_info *mtd, struct nand_chip *chip,
1152 struct denali_nand_info *denali)
1153{
1154 int oobavail = mtd->oobsize - denali->bbtskipbytes;
1155 int ret;
1156
1157 /*
1158 * If .size and .strength are already set (usually by DT),
1159 * check if they are supported by this controller.
1160 */
1161 if (chip->ecc.size && chip->ecc.strength)
1162 return nand_check_ecc_caps(chip, denali->ecc_caps, oobavail);
1163
1164 /*
1165 * We want .size and .strength closest to the chip's requirement
1166 * unless NAND_ECC_MAXIMIZE is requested.
1167 */
1168 if (!(chip->ecc.options & NAND_ECC_MAXIMIZE)) {
1169 ret = nand_match_ecc_req(chip, denali->ecc_caps, oobavail);
1170 if (!ret)
1171 return 0;
1172 }
1173
1174 /* Max ECC strength is the last thing we can do */
1175 return nand_maximize_ecc(chip, denali->ecc_caps, oobavail);
1176}
Boris Brezillon14fad622016-02-03 20:00:11 +01001177
1178static int denali_ooblayout_ecc(struct mtd_info *mtd, int section,
1179 struct mtd_oob_region *oobregion)
1180{
1181 struct denali_nand_info *denali = mtd_to_denali(mtd);
1182 struct nand_chip *chip = mtd_to_nand(mtd);
1183
1184 if (section)
1185 return -ERANGE;
1186
1187 oobregion->offset = denali->bbtskipbytes;
1188 oobregion->length = chip->ecc.total;
1189
1190 return 0;
1191}
1192
1193static int denali_ooblayout_free(struct mtd_info *mtd, int section,
1194 struct mtd_oob_region *oobregion)
1195{
1196 struct denali_nand_info *denali = mtd_to_denali(mtd);
1197 struct nand_chip *chip = mtd_to_nand(mtd);
1198
1199 if (section)
1200 return -ERANGE;
1201
1202 oobregion->offset = chip->ecc.total + denali->bbtskipbytes;
1203 oobregion->length = mtd->oobsize - oobregion->offset;
1204
1205 return 0;
1206}
1207
1208static const struct mtd_ooblayout_ops denali_ooblayout_ops = {
1209 .ecc = denali_ooblayout_ecc,
1210 .free = denali_ooblayout_free,
Jason Robertsce082592010-05-13 15:57:33 +01001211};
1212
1213static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
1214static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
1215
1216static struct nand_bbt_descr bbt_main_descr = {
1217 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
1218 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
1219 .offs = 8,
1220 .len = 4,
1221 .veroffs = 12,
1222 .maxblocks = 4,
1223 .pattern = bbt_pattern,
1224};
1225
1226static struct nand_bbt_descr bbt_mirror_descr = {
1227 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
1228 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
1229 .offs = 8,
1230 .len = 4,
1231 .veroffs = 12,
1232 .maxblocks = 4,
1233 .pattern = mirror_pattern,
1234};
1235
Uwe Kleine-König421f91d2010-06-11 12:17:00 +02001236/* initialize driver data structures */
Brian Norris8c519432013-08-10 22:57:30 -07001237static void denali_drv_init(struct denali_nand_info *denali)
Jason Robertsce082592010-05-13 15:57:33 +01001238{
Masahiro Yamada43914a22014-09-09 11:01:51 +09001239 /*
1240 * the completion object will be used to notify
1241 * the callee that the interrupt is done
1242 */
Jason Robertsce082592010-05-13 15:57:33 +01001243 init_completion(&denali->complete);
1244
Masahiro Yamada43914a22014-09-09 11:01:51 +09001245 /*
1246 * the spinlock will be used to synchronize the ISR with any
1247 * element that might be access shared data (interrupt status)
1248 */
Jason Robertsce082592010-05-13 15:57:33 +01001249 spin_lock_init(&denali->irq_lock);
Jason Robertsce082592010-05-13 15:57:33 +01001250}
1251
Masahiro Yamadae93c1642017-03-23 05:07:21 +09001252static int denali_multidev_fixup(struct denali_nand_info *denali)
Masahiro Yamada6da27b42017-03-23 05:07:20 +09001253{
1254 struct nand_chip *chip = &denali->nand;
1255 struct mtd_info *mtd = nand_to_mtd(chip);
1256
1257 /*
1258 * Support for multi device:
1259 * When the IP configuration is x16 capable and two x8 chips are
1260 * connected in parallel, DEVICES_CONNECTED should be set to 2.
1261 * In this case, the core framework knows nothing about this fact,
1262 * so we should tell it the _logical_ pagesize and anything necessary.
1263 */
1264 denali->devnum = ioread32(denali->flash_reg + DEVICES_CONNECTED);
1265
Masahiro Yamadacc5d8032017-03-23 05:07:22 +09001266 /*
1267 * On some SoCs, DEVICES_CONNECTED is not auto-detected.
1268 * For those, DEVICES_CONNECTED is left to 0. Set 1 if it is the case.
1269 */
1270 if (denali->devnum == 0) {
1271 denali->devnum = 1;
1272 iowrite32(1, denali->flash_reg + DEVICES_CONNECTED);
1273 }
1274
Masahiro Yamadae93c1642017-03-23 05:07:21 +09001275 if (denali->devnum == 1)
1276 return 0;
1277
1278 if (denali->devnum != 2) {
1279 dev_err(denali->dev, "unsupported number of devices %d\n",
1280 denali->devnum);
1281 return -EINVAL;
1282 }
1283
1284 /* 2 chips in parallel */
1285 mtd->size <<= 1;
1286 mtd->erasesize <<= 1;
1287 mtd->writesize <<= 1;
1288 mtd->oobsize <<= 1;
1289 chip->chipsize <<= 1;
1290 chip->page_shift += 1;
1291 chip->phys_erase_shift += 1;
1292 chip->bbt_erase_shift += 1;
1293 chip->chip_shift += 1;
1294 chip->pagemask <<= 1;
1295 chip->ecc.size <<= 1;
1296 chip->ecc.bytes <<= 1;
1297 chip->ecc.strength <<= 1;
1298 denali->bbtskipbytes <<= 1;
1299
1300 return 0;
Masahiro Yamada6da27b42017-03-23 05:07:20 +09001301}
1302
Dinh Nguyen2a0a2882012-09-27 10:58:05 -06001303int denali_init(struct denali_nand_info *denali)
Jason Robertsce082592010-05-13 15:57:33 +01001304{
Masahiro Yamada1394a722017-03-23 05:07:17 +09001305 struct nand_chip *chip = &denali->nand;
1306 struct mtd_info *mtd = nand_to_mtd(chip);
Dinh Nguyen2a0a2882012-09-27 10:58:05 -06001307 int ret;
Jason Robertsce082592010-05-13 15:57:33 +01001308
Boris BREZILLON442f201b2015-12-11 15:06:00 +01001309 mtd->dev.parent = denali->dev;
Jason Robertsce082592010-05-13 15:57:33 +01001310 denali_hw_init(denali);
1311 denali_drv_init(denali);
1312
Masahiro Yamadac19e31d2017-06-13 22:45:38 +09001313 denali_clear_irq_all(denali);
1314
Masahiro Yamada7ebb8d02016-11-09 13:35:27 +09001315 /* Request IRQ after all the hardware initialization is finished */
1316 ret = devm_request_irq(denali->dev, denali->irq, denali_isr,
1317 IRQF_SHARED, DENALI_NAND_NAME, denali);
1318 if (ret) {
Masahiro Yamada789ccf12016-11-09 13:35:24 +09001319 dev_err(denali->dev, "Unable to request IRQ\n");
Masahiro Yamada7ebb8d02016-11-09 13:35:27 +09001320 return ret;
Jason Robertsce082592010-05-13 15:57:33 +01001321 }
1322
Masahiro Yamadac19e31d2017-06-13 22:45:38 +09001323 denali_enable_irq(denali);
Masahiro Yamadad49f5792017-06-13 22:45:41 +09001324 denali_reset_banks(denali);
1325
1326 denali->flash_bank = CHIP_SELECT_INVALID;
Masahiro Yamadac19e31d2017-06-13 22:45:38 +09001327
Masahiro Yamada63757d42017-03-23 05:07:18 +09001328 nand_set_flash_node(chip, denali->dev->of_node);
Masahiro Yamada8aabdf32017-03-30 15:45:48 +09001329 /* Fallback to the default name if DT did not give "label" property */
1330 if (!mtd->name)
1331 mtd->name = "denali-nand";
Jason Robertsce082592010-05-13 15:57:33 +01001332
1333 /* register the driver with the NAND core subsystem */
Masahiro Yamada1394a722017-03-23 05:07:17 +09001334 chip->select_chip = denali_select_chip;
Masahiro Yamada1394a722017-03-23 05:07:17 +09001335 chip->read_byte = denali_read_byte;
Masahiro Yamadafa6134e2017-06-13 22:45:39 +09001336 chip->write_byte = denali_write_byte;
1337 chip->read_word = denali_read_word;
1338 chip->cmd_ctrl = denali_cmd_ctrl;
1339 chip->dev_ready = denali_dev_ready;
Masahiro Yamada1394a722017-03-23 05:07:17 +09001340 chip->waitfunc = denali_waitfunc;
Jason Robertsce082592010-05-13 15:57:33 +01001341
Masahiro Yamada1bb88662017-06-13 22:45:37 +09001342 /* clk rate info is needed for setup_data_interface */
1343 if (denali->clk_x_rate)
1344 chip->setup_data_interface = denali_setup_data_interface;
1345
Masahiro Yamada43914a22014-09-09 11:01:51 +09001346 /*
1347 * scan for NAND devices attached to the controller
Jason Robertsce082592010-05-13 15:57:33 +01001348 * this is the first stage in a two step process to register
Masahiro Yamada43914a22014-09-09 11:01:51 +09001349 * with the nand subsystem
1350 */
Masahiro Yamadaa227d4e2016-11-09 13:35:28 +09001351 ret = nand_scan_ident(mtd, denali->max_banks, NULL);
1352 if (ret)
Masahiro Yamadac19e31d2017-06-13 22:45:38 +09001353 goto disable_irq;
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001354
Masahiro Yamada00fc6152017-06-13 22:45:43 +09001355 denali->buf = devm_kzalloc(denali->dev, mtd->writesize + mtd->oobsize,
1356 GFP_KERNEL);
1357 if (!denali->buf) {
Huang Shijiee07caa32013-12-21 00:02:28 +08001358 ret = -ENOMEM;
Masahiro Yamadac19e31d2017-06-13 22:45:38 +09001359 goto disable_irq;
Huang Shijiee07caa32013-12-21 00:02:28 +08001360 }
1361
Masahiro Yamada26d266e2017-06-13 22:45:45 +09001362 if (ioread32(denali->flash_reg + FEATURES) & FEATURES__DMA)
1363 denali->dma_avail = 1;
1364
1365 if (denali->dma_avail) {
1366 int dma_bit = denali->caps & DENALI_CAP_DMA_64BIT ? 64 : 32;
1367
1368 ret = dma_set_mask(denali->dev, DMA_BIT_MASK(dma_bit));
1369 if (ret) {
1370 dev_info(denali->dev,
1371 "Failed to set DMA mask. Disabling DMA.\n");
1372 denali->dma_avail = 0;
1373 }
Huang Shijiee07caa32013-12-21 00:02:28 +08001374 }
1375
Masahiro Yamada26d266e2017-06-13 22:45:45 +09001376 if (denali->dma_avail) {
Masahiro Yamada997cde22017-06-13 22:45:47 +09001377 chip->options |= NAND_USE_BOUNCE_BUFFER;
1378 chip->buf_align = 16;
Chuanxiao.Dong664065242010-08-06 18:48:21 +08001379 }
1380
Masahiro Yamada43914a22014-09-09 11:01:51 +09001381 /*
Masahiro Yamada43914a22014-09-09 11:01:51 +09001382 * second stage of the NAND scan
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001383 * this stage requires information regarding ECC and
Masahiro Yamada43914a22014-09-09 11:01:51 +09001384 * bad block management.
1385 */
Jason Robertsce082592010-05-13 15:57:33 +01001386
1387 /* Bad block management */
Masahiro Yamada1394a722017-03-23 05:07:17 +09001388 chip->bbt_td = &bbt_main_descr;
1389 chip->bbt_md = &bbt_mirror_descr;
Jason Robertsce082592010-05-13 15:57:33 +01001390
1391 /* skip the scan for now until we have OOB read and write support */
Masahiro Yamada1394a722017-03-23 05:07:17 +09001392 chip->bbt_options |= NAND_BBT_USE_FLASH;
1393 chip->options |= NAND_SKIP_BBTSCAN;
1394 chip->ecc.mode = NAND_ECC_HW_SYNDROME;
Jason Robertsce082592010-05-13 15:57:33 +01001395
Graham Moored99d7282015-01-14 09:38:50 -06001396 /* no subpage writes on denali */
Masahiro Yamada1394a722017-03-23 05:07:17 +09001397 chip->options |= NAND_NO_SUBPAGE_WRITE;
Graham Moored99d7282015-01-14 09:38:50 -06001398
Masahiro Yamada7de117f2017-06-07 20:52:12 +09001399 ret = denali_ecc_setup(mtd, chip, denali);
1400 if (ret) {
1401 dev_err(denali->dev, "Failed to setup ECC settings.\n");
Masahiro Yamadac19e31d2017-06-13 22:45:38 +09001402 goto disable_irq;
Jason Robertsce082592010-05-13 15:57:33 +01001403 }
1404
Masahiro Yamada7de117f2017-06-07 20:52:12 +09001405 dev_dbg(denali->dev,
1406 "chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
1407 chip->ecc.size, chip->ecc.strength, chip->ecc.bytes);
1408
Masahiro Yamada57a4d8b2017-06-13 22:45:46 +09001409 iowrite32(MAKE_ECC_CORRECTION(chip->ecc.strength, 1),
1410 denali->flash_reg + ECC_CORRECTION);
Masahiro Yamada0615e7a2017-06-07 20:52:13 +09001411 iowrite32(mtd->erasesize / mtd->writesize,
1412 denali->flash_reg + PAGES_PER_BLOCK);
1413 iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0,
1414 denali->flash_reg + DEVICE_WIDTH);
1415 iowrite32(mtd->writesize, denali->flash_reg + DEVICE_MAIN_AREA_SIZE);
1416 iowrite32(mtd->oobsize, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
Masahiro Yamada7de117f2017-06-07 20:52:12 +09001417
1418 iowrite32(chip->ecc.size, denali->flash_reg + CFG_DATA_BLOCK_SIZE);
1419 iowrite32(chip->ecc.size, denali->flash_reg + CFG_LAST_DATA_BLOCK_SIZE);
1420 /* chip->ecc.steps is set by nand_scan_tail(); not available here */
1421 iowrite32(mtd->writesize / chip->ecc.size,
1422 denali->flash_reg + CFG_NUM_DATA_BLOCKS);
1423
Boris Brezillon14fad622016-02-03 20:00:11 +01001424 mtd_set_ooblayout(mtd, &denali_ooblayout_ops);
Chuanxiao Dongdb9a32102010-08-06 18:02:03 +08001425
Masahiro Yamadafa6134e2017-06-13 22:45:39 +09001426 if (chip->options & NAND_BUSWIDTH_16) {
1427 chip->read_buf = denali_read_buf16;
1428 chip->write_buf = denali_write_buf16;
1429 } else {
1430 chip->read_buf = denali_read_buf;
1431 chip->write_buf = denali_write_buf;
1432 }
Masahiro Yamadab21ff822017-06-13 22:45:35 +09001433 chip->ecc.options |= NAND_ECC_CUSTOM_PAGE_ACCESS;
Masahiro Yamada1394a722017-03-23 05:07:17 +09001434 chip->ecc.read_page = denali_read_page;
1435 chip->ecc.read_page_raw = denali_read_page_raw;
1436 chip->ecc.write_page = denali_write_page;
1437 chip->ecc.write_page_raw = denali_write_page_raw;
1438 chip->ecc.read_oob = denali_read_oob;
1439 chip->ecc.write_oob = denali_write_oob;
1440 chip->erase = denali_erase;
Jason Robertsce082592010-05-13 15:57:33 +01001441
Masahiro Yamadae93c1642017-03-23 05:07:21 +09001442 ret = denali_multidev_fixup(denali);
1443 if (ret)
Masahiro Yamadac19e31d2017-06-13 22:45:38 +09001444 goto disable_irq;
Masahiro Yamada6da27b42017-03-23 05:07:20 +09001445
Masahiro Yamadaa227d4e2016-11-09 13:35:28 +09001446 ret = nand_scan_tail(mtd);
1447 if (ret)
Masahiro Yamadac19e31d2017-06-13 22:45:38 +09001448 goto disable_irq;
Jason Robertsce082592010-05-13 15:57:33 +01001449
Boris BREZILLON442f201b2015-12-11 15:06:00 +01001450 ret = mtd_device_register(mtd, NULL, 0);
Jason Robertsce082592010-05-13 15:57:33 +01001451 if (ret) {
Masahiro Yamada789ccf12016-11-09 13:35:24 +09001452 dev_err(denali->dev, "Failed to register MTD: %d\n", ret);
Masahiro Yamadac19e31d2017-06-13 22:45:38 +09001453 goto disable_irq;
Jason Robertsce082592010-05-13 15:57:33 +01001454 }
1455 return 0;
1456
Masahiro Yamadac19e31d2017-06-13 22:45:38 +09001457disable_irq:
1458 denali_disable_irq(denali);
Dinh Nguyen2a0a2882012-09-27 10:58:05 -06001459
Jason Robertsce082592010-05-13 15:57:33 +01001460 return ret;
1461}
Dinh Nguyen2a0a2882012-09-27 10:58:05 -06001462EXPORT_SYMBOL(denali_init);
Jason Robertsce082592010-05-13 15:57:33 +01001463
1464/* driver exit point */
Dinh Nguyen2a0a2882012-09-27 10:58:05 -06001465void denali_remove(struct denali_nand_info *denali)
Jason Robertsce082592010-05-13 15:57:33 +01001466{
Boris BREZILLON442f201b2015-12-11 15:06:00 +01001467 struct mtd_info *mtd = nand_to_mtd(&denali->nand);
Boris BREZILLON320092a2015-12-11 15:02:34 +01001468
Boris BREZILLON442f201b2015-12-11 15:06:00 +01001469 nand_release(mtd);
Masahiro Yamadac19e31d2017-06-13 22:45:38 +09001470 denali_disable_irq(denali);
Jason Robertsce082592010-05-13 15:57:33 +01001471}
Dinh Nguyen2a0a2882012-09-27 10:58:05 -06001472EXPORT_SYMBOL(denali_remove);