blob: bb2da2fd069e8aa4c9af0d9b4ff50232ff3a70cc [file] [log] [blame]
Jason Robertsce082592010-05-13 15:57:33 +01001/*
2 * NAND Flash Controller Device Driver
3 * Copyright © 2009-2010, Intel Corporation and its suppliers.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 */
Jason Robertsce082592010-05-13 15:57:33 +010019#include <linux/interrupt.h>
20#include <linux/delay.h>
Jamie Iles84457942011-05-06 15:28:55 +010021#include <linux/dma-mapping.h>
Jason Robertsce082592010-05-13 15:57:33 +010022#include <linux/wait.h>
23#include <linux/mutex.h>
Jason Robertsce082592010-05-13 15:57:33 +010024#include <linux/mtd/mtd.h>
25#include <linux/module.h>
Masahiro Yamada7d370b22017-06-13 22:45:48 +090026#include <linux/slab.h>
Jason Robertsce082592010-05-13 15:57:33 +010027
28#include "denali.h"
29
30MODULE_LICENSE("GPL");
31
Jason Robertsce082592010-05-13 15:57:33 +010032#define DENALI_NAND_NAME "denali-nand"
33
Masahiro Yamada43914a22014-09-09 11:01:51 +090034/*
Masahiro Yamada43914a22014-09-09 11:01:51 +090035 * indicates whether or not the internal value for the flash bank is
36 * valid or not
37 */
Chuanxiao5bac3ac2010-08-05 23:06:04 +080038#define CHIP_SELECT_INVALID -1
Jason Robertsce082592010-05-13 15:57:33 +010039
Masahiro Yamadac19e31d2017-06-13 22:45:38 +090040#define DENALI_NR_BANKS 4
41
Masahiro Yamada43914a22014-09-09 11:01:51 +090042/*
Masahiro Yamada1bb88662017-06-13 22:45:37 +090043 * The bus interface clock, clk_x, is phase aligned with the core clock. The
44 * clk_x is an integral multiple N of the core clk. The value N is configured
45 * at IP delivery time, and its available value is 4, 5, or 6. We need to align
46 * to the largest value to make it work with any possible configuration.
Masahiro Yamada43914a22014-09-09 11:01:51 +090047 */
Masahiro Yamada1bb88662017-06-13 22:45:37 +090048#define DENALI_CLK_X_MULT 6
Jason Robertsce082592010-05-13 15:57:33 +010049
Masahiro Yamada43914a22014-09-09 11:01:51 +090050/*
51 * this macro allows us to convert from an MTD structure to our own
Jason Robertsce082592010-05-13 15:57:33 +010052 * device context (denali) structure.
53 */
Boris BREZILLON442f201b2015-12-11 15:06:00 +010054static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd)
55{
56 return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand);
57}
Jason Robertsce082592010-05-13 15:57:33 +010058
Masahiro Yamada43914a22014-09-09 11:01:51 +090059/*
Masahiro Yamada43914a22014-09-09 11:01:51 +090060 * this is a helper macro that allows us to
61 * format the bank into the proper bits for the controller
62 */
Jason Robertsce082592010-05-13 15:57:33 +010063#define BANK(x) ((x) << 24)
64
Masahiro Yamada43914a22014-09-09 11:01:51 +090065/*
66 * Certain operations for the denali NAND controller use an indexed mode to
67 * read/write data. The operation is performed by writing the address value
68 * of the command to the device memory followed by the data. This function
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +080069 * abstracts this common operation.
Masahiro Yamada43914a22014-09-09 11:01:51 +090070 */
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +080071static void index_addr(struct denali_nand_info *denali,
72 uint32_t address, uint32_t data)
Jason Robertsce082592010-05-13 15:57:33 +010073{
Chuanxiao Dong24c3fa32010-08-09 23:59:23 +080074 iowrite32(address, denali->flash_mem);
75 iowrite32(data, denali->flash_mem + 0x10);
Jason Robertsce082592010-05-13 15:57:33 +010076}
77
Masahiro Yamada43914a22014-09-09 11:01:51 +090078/*
Jamie Ilesc89eeda2011-05-06 15:28:57 +010079 * Use the configuration feature register to determine the maximum number of
80 * banks that the hardware supports.
81 */
82static void detect_max_banks(struct denali_nand_info *denali)
83{
84 uint32_t features = ioread32(denali->flash_reg + FEATURES);
85
Masahiro Yamadae7beeee2017-03-30 15:45:57 +090086 denali->max_banks = 1 << (features & FEATURES__N_BANKS);
87
88 /* the encoding changed from rev 5.0 to 5.1 */
89 if (denali->revision < 0x0501)
90 denali->max_banks <<= 1;
Jamie Ilesc89eeda2011-05-06 15:28:57 +010091}
92
Masahiro Yamadac19e31d2017-06-13 22:45:38 +090093static void denali_enable_irq(struct denali_nand_info *denali)
Jason Robertsce082592010-05-13 15:57:33 +010094{
Jamie Iles9589bf52011-05-06 15:28:56 +010095 int i;
96
Masahiro Yamadac19e31d2017-06-13 22:45:38 +090097 for (i = 0; i < DENALI_NR_BANKS; i++)
98 iowrite32(U32_MAX, denali->flash_reg + INTR_EN(i));
99 iowrite32(GLOBAL_INT_EN_FLAG, denali->flash_reg + GLOBAL_INT_ENABLE);
Jason Robertsce082592010-05-13 15:57:33 +0100100}
101
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900102static void denali_disable_irq(struct denali_nand_info *denali)
Jason Robertsce082592010-05-13 15:57:33 +0100103{
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900104 int i;
105
106 for (i = 0; i < DENALI_NR_BANKS; i++)
107 iowrite32(0, denali->flash_reg + INTR_EN(i));
108 iowrite32(0, denali->flash_reg + GLOBAL_INT_ENABLE);
Jason Robertsce082592010-05-13 15:57:33 +0100109}
110
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900111static void denali_clear_irq(struct denali_nand_info *denali,
112 int bank, uint32_t irq_status)
Jason Robertsce082592010-05-13 15:57:33 +0100113{
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900114 /* write one to clear bits */
115 iowrite32(irq_status, denali->flash_reg + INTR_STATUS(bank));
Jason Robertsce082592010-05-13 15:57:33 +0100116}
117
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900118static void denali_clear_irq_all(struct denali_nand_info *denali)
Jason Robertsce082592010-05-13 15:57:33 +0100119{
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900120 int i;
Masahiro Yamada5637b692014-09-09 11:01:52 +0900121
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900122 for (i = 0; i < DENALI_NR_BANKS; i++)
123 denali_clear_irq(denali, i, U32_MAX);
Jason Robertsce082592010-05-13 15:57:33 +0100124}
125
Jason Robertsce082592010-05-13 15:57:33 +0100126static irqreturn_t denali_isr(int irq, void *dev_id)
127{
128 struct denali_nand_info *denali = dev_id;
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900129 irqreturn_t ret = IRQ_NONE;
Masahiro Yamada5637b692014-09-09 11:01:52 +0900130 uint32_t irq_status;
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900131 int i;
Jason Robertsce082592010-05-13 15:57:33 +0100132
133 spin_lock(&denali->irq_lock);
134
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900135 for (i = 0; i < DENALI_NR_BANKS; i++) {
136 irq_status = ioread32(denali->flash_reg + INTR_STATUS(i));
137 if (irq_status)
138 ret = IRQ_HANDLED;
139
140 denali_clear_irq(denali, i, irq_status);
141
142 if (i != denali->flash_bank)
143 continue;
144
145 denali->irq_status |= irq_status;
146
147 if (denali->irq_status & denali->irq_mask)
Jason Robertsce082592010-05-13 15:57:33 +0100148 complete(&denali->complete);
Jason Robertsce082592010-05-13 15:57:33 +0100149 }
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900150
Jason Robertsce082592010-05-13 15:57:33 +0100151 spin_unlock(&denali->irq_lock);
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900152
153 return ret;
Jason Robertsce082592010-05-13 15:57:33 +0100154}
Jason Robertsce082592010-05-13 15:57:33 +0100155
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900156static void denali_reset_irq(struct denali_nand_info *denali)
Jason Robertsce082592010-05-13 15:57:33 +0100157{
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900158 unsigned long flags;
Jason Robertsce082592010-05-13 15:57:33 +0100159
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900160 spin_lock_irqsave(&denali->irq_lock, flags);
161 denali->irq_status = 0;
162 denali->irq_mask = 0;
163 spin_unlock_irqrestore(&denali->irq_lock, flags);
164}
Jason Robertsce082592010-05-13 15:57:33 +0100165
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900166static uint32_t denali_wait_for_irq(struct denali_nand_info *denali,
167 uint32_t irq_mask)
168{
169 unsigned long time_left, flags;
170 uint32_t irq_status;
Masahiro Yamada81254502014-09-16 20:04:25 +0900171
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900172 spin_lock_irqsave(&denali->irq_lock, flags);
Jason Robertsce082592010-05-13 15:57:33 +0100173
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900174 irq_status = denali->irq_status;
Jason Robertsce082592010-05-13 15:57:33 +0100175
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900176 if (irq_mask & irq_status) {
177 /* return immediately if the IRQ has already happened. */
178 spin_unlock_irqrestore(&denali->irq_lock, flags);
179 return irq_status;
Jason Robertsce082592010-05-13 15:57:33 +0100180 }
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900181
182 denali->irq_mask = irq_mask;
183 reinit_completion(&denali->complete);
184 spin_unlock_irqrestore(&denali->irq_lock, flags);
185
186 time_left = wait_for_completion_timeout(&denali->complete,
187 msecs_to_jiffies(1000));
188 if (!time_left) {
189 dev_err(denali->dev, "timeout while waiting for irq 0x%x\n",
190 denali->irq_mask);
191 return 0;
192 }
193
194 return denali->irq_status;
195}
196
Masahiro Yamadafa6134e2017-06-13 22:45:39 +0900197static uint32_t denali_check_irq(struct denali_nand_info *denali)
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900198{
Masahiro Yamadafa6134e2017-06-13 22:45:39 +0900199 unsigned long flags;
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900200 uint32_t irq_status;
201
Masahiro Yamadafa6134e2017-06-13 22:45:39 +0900202 spin_lock_irqsave(&denali->irq_lock, flags);
203 irq_status = denali->irq_status;
204 spin_unlock_irqrestore(&denali->irq_lock, flags);
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900205
Masahiro Yamadafa6134e2017-06-13 22:45:39 +0900206 return irq_status;
Jason Robertsce082592010-05-13 15:57:33 +0100207}
208
Masahiro Yamada43914a22014-09-09 11:01:51 +0900209/*
210 * This helper function setups the registers for ECC and whether or not
211 * the spare area will be transferred.
212 */
Chuanxiao5bac3ac2010-08-05 23:06:04 +0800213static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en,
Jason Robertsce082592010-05-13 15:57:33 +0100214 bool transfer_spare)
215{
Masahiro Yamada5637b692014-09-09 11:01:52 +0900216 int ecc_en_flag, transfer_spare_flag;
Jason Robertsce082592010-05-13 15:57:33 +0100217
218 /* set ECC, transfer spare bits if needed */
219 ecc_en_flag = ecc_en ? ECC_ENABLE__FLAG : 0;
220 transfer_spare_flag = transfer_spare ? TRANSFER_SPARE_REG__FLAG : 0;
221
222 /* Enable spare area/ECC per user's request. */
Chuanxiao Dong24c3fa32010-08-09 23:59:23 +0800223 iowrite32(ecc_en_flag, denali->flash_reg + ECC_ENABLE);
Masahiro Yamada81254502014-09-16 20:04:25 +0900224 iowrite32(transfer_spare_flag, denali->flash_reg + TRANSFER_SPARE_REG);
Jason Robertsce082592010-05-13 15:57:33 +0100225}
226
Masahiro Yamadafa6134e2017-06-13 22:45:39 +0900227static void denali_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
228{
229 struct denali_nand_info *denali = mtd_to_denali(mtd);
230 int i;
231
232 iowrite32(MODE_11 | BANK(denali->flash_bank) | 2, denali->flash_mem);
233
234 for (i = 0; i < len; i++)
235 buf[i] = ioread32(denali->flash_mem + 0x10);
236}
237
238static void denali_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
239{
240 struct denali_nand_info *denali = mtd_to_denali(mtd);
241 int i;
242
243 iowrite32(MODE_11 | BANK(denali->flash_bank) | 2, denali->flash_mem);
244
245 for (i = 0; i < len; i++)
246 iowrite32(buf[i], denali->flash_mem + 0x10);
247}
248
249static void denali_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
250{
251 struct denali_nand_info *denali = mtd_to_denali(mtd);
252 uint16_t *buf16 = (uint16_t *)buf;
253 int i;
254
255 iowrite32(MODE_11 | BANK(denali->flash_bank) | 2, denali->flash_mem);
256
257 for (i = 0; i < len / 2; i++)
258 buf16[i] = ioread32(denali->flash_mem + 0x10);
259}
260
261static void denali_write_buf16(struct mtd_info *mtd, const uint8_t *buf,
262 int len)
263{
264 struct denali_nand_info *denali = mtd_to_denali(mtd);
265 const uint16_t *buf16 = (const uint16_t *)buf;
266 int i;
267
268 iowrite32(MODE_11 | BANK(denali->flash_bank) | 2, denali->flash_mem);
269
270 for (i = 0; i < len / 2; i++)
271 iowrite32(buf16[i], denali->flash_mem + 0x10);
272}
273
274static uint8_t denali_read_byte(struct mtd_info *mtd)
275{
276 uint8_t byte;
277
278 denali_read_buf(mtd, &byte, 1);
279
280 return byte;
281}
282
283static void denali_write_byte(struct mtd_info *mtd, uint8_t byte)
284{
285 denali_write_buf(mtd, &byte, 1);
286}
287
288static uint16_t denali_read_word(struct mtd_info *mtd)
289{
290 uint16_t word;
291
292 denali_read_buf16(mtd, (uint8_t *)&word, 2);
293
294 return word;
295}
296
297static void denali_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
298{
299 struct denali_nand_info *denali = mtd_to_denali(mtd);
300 uint32_t type;
301
302 if (ctrl & NAND_CLE)
303 type = 0;
304 else if (ctrl & NAND_ALE)
305 type = 1;
306 else
307 return;
308
309 /*
310 * Some commands are followed by chip->dev_ready or chip->waitfunc.
311 * irq_status must be cleared here to catch the R/B# interrupt later.
312 */
313 if (ctrl & NAND_CTRL_CHANGE)
314 denali_reset_irq(denali);
315
316 index_addr(denali, MODE_11 | BANK(denali->flash_bank) | type, dat);
317}
318
319static int denali_dev_ready(struct mtd_info *mtd)
320{
321 struct denali_nand_info *denali = mtd_to_denali(mtd);
322
323 return !!(denali_check_irq(denali) & INTR__INT_ACT);
324}
325
Masahiro Yamadad29109b2017-03-30 15:45:51 +0900326static int denali_check_erased_page(struct mtd_info *mtd,
327 struct nand_chip *chip, uint8_t *buf,
328 unsigned long uncor_ecc_flags,
329 unsigned int max_bitflips)
Jason Robertsce082592010-05-13 15:57:33 +0100330{
Masahiro Yamadad29109b2017-03-30 15:45:51 +0900331 uint8_t *ecc_code = chip->buffers->ecccode;
332 int ecc_steps = chip->ecc.steps;
333 int ecc_size = chip->ecc.size;
334 int ecc_bytes = chip->ecc.bytes;
335 int i, ret, stat;
Masahiro Yamada81254502014-09-16 20:04:25 +0900336
Masahiro Yamadad29109b2017-03-30 15:45:51 +0900337 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
338 chip->ecc.total);
339 if (ret)
340 return ret;
341
342 for (i = 0; i < ecc_steps; i++) {
343 if (!(uncor_ecc_flags & BIT(i)))
344 continue;
345
346 stat = nand_check_erased_ecc_chunk(buf, ecc_size,
347 ecc_code, ecc_bytes,
348 NULL, 0,
349 chip->ecc.strength);
350 if (stat < 0) {
351 mtd->ecc_stats.failed++;
352 } else {
353 mtd->ecc_stats.corrected += stat;
354 max_bitflips = max_t(unsigned int, max_bitflips, stat);
355 }
356
357 buf += ecc_size;
358 ecc_code += ecc_bytes;
359 }
360
361 return max_bitflips;
Jason Robertsce082592010-05-13 15:57:33 +0100362}
Masahiro Yamadad29109b2017-03-30 15:45:51 +0900363
Masahiro Yamada24715c72017-03-30 15:45:52 +0900364static int denali_hw_ecc_fixup(struct mtd_info *mtd,
365 struct denali_nand_info *denali,
366 unsigned long *uncor_ecc_flags)
367{
368 struct nand_chip *chip = mtd_to_nand(mtd);
369 int bank = denali->flash_bank;
370 uint32_t ecc_cor;
371 unsigned int max_bitflips;
372
373 ecc_cor = ioread32(denali->flash_reg + ECC_COR_INFO(bank));
374 ecc_cor >>= ECC_COR_INFO__SHIFT(bank);
375
376 if (ecc_cor & ECC_COR_INFO__UNCOR_ERR) {
377 /*
378 * This flag is set when uncorrectable error occurs at least in
379 * one ECC sector. We can not know "how many sectors", or
380 * "which sector(s)". We need erase-page check for all sectors.
381 */
382 *uncor_ecc_flags = GENMASK(chip->ecc.steps - 1, 0);
383 return 0;
384 }
385
386 max_bitflips = ecc_cor & ECC_COR_INFO__MAX_ERRORS;
387
388 /*
389 * The register holds the maximum of per-sector corrected bitflips.
390 * This is suitable for the return value of the ->read_page() callback.
391 * Unfortunately, we can not know the total number of corrected bits in
392 * the page. Increase the stats by max_bitflips. (compromised solution)
393 */
394 mtd->ecc_stats.corrected += max_bitflips;
395
396 return max_bitflips;
397}
398
Jason Robertsce082592010-05-13 15:57:33 +0100399#define ECC_SECTOR(x) (((x) & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12)
400#define ECC_BYTE(x) (((x) & ECC_ERROR_ADDRESS__OFFSET))
401#define ECC_CORRECTION_VALUE(x) ((x) & ERR_CORRECTION_INFO__BYTEMASK)
Masahiro Yamada20d48592017-03-30 15:45:50 +0900402#define ECC_ERROR_UNCORRECTABLE(x) ((x) & ERR_CORRECTION_INFO__ERROR_TYPE)
Chuanxiao Dong8ae61eb2010-08-10 00:07:01 +0800403#define ECC_ERR_DEVICE(x) (((x) & ERR_CORRECTION_INFO__DEVICE_NR) >> 8)
Jason Robertsce082592010-05-13 15:57:33 +0100404#define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO)
405
Masahiro Yamada24715c72017-03-30 15:45:52 +0900406static int denali_sw_ecc_fixup(struct mtd_info *mtd,
407 struct denali_nand_info *denali,
408 unsigned long *uncor_ecc_flags, uint8_t *buf)
Jason Robertsce082592010-05-13 15:57:33 +0100409{
Masahiro Yamada7de117f2017-06-07 20:52:12 +0900410 unsigned int ecc_size = denali->nand.ecc.size;
Mike Dunn3f91e942012-04-25 12:06:09 -0700411 unsigned int bitflips = 0;
Masahiro Yamada20d48592017-03-30 15:45:50 +0900412 unsigned int max_bitflips = 0;
413 uint32_t err_addr, err_cor_info;
414 unsigned int err_byte, err_sector, err_device;
415 uint8_t err_cor_value;
416 unsigned int prev_sector = 0;
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900417 uint32_t irq_status;
Jason Robertsce082592010-05-13 15:57:33 +0100418
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900419 denali_reset_irq(denali);
Jason Robertsce082592010-05-13 15:57:33 +0100420
Masahiro Yamada20d48592017-03-30 15:45:50 +0900421 do {
422 err_addr = ioread32(denali->flash_reg + ECC_ERROR_ADDRESS);
423 err_sector = ECC_SECTOR(err_addr);
424 err_byte = ECC_BYTE(err_addr);
Jason Robertsce082592010-05-13 15:57:33 +0100425
Masahiro Yamada20d48592017-03-30 15:45:50 +0900426 err_cor_info = ioread32(denali->flash_reg + ERR_CORRECTION_INFO);
427 err_cor_value = ECC_CORRECTION_VALUE(err_cor_info);
428 err_device = ECC_ERR_DEVICE(err_cor_info);
Jason Robertsce082592010-05-13 15:57:33 +0100429
Masahiro Yamada20d48592017-03-30 15:45:50 +0900430 /* reset the bitflip counter when crossing ECC sector */
431 if (err_sector != prev_sector)
432 bitflips = 0;
Masahiro Yamada81254502014-09-16 20:04:25 +0900433
Masahiro Yamada20d48592017-03-30 15:45:50 +0900434 if (ECC_ERROR_UNCORRECTABLE(err_cor_info)) {
435 /*
Masahiro Yamadad29109b2017-03-30 15:45:51 +0900436 * Check later if this is a real ECC error, or
437 * an erased sector.
Masahiro Yamada20d48592017-03-30 15:45:50 +0900438 */
Masahiro Yamadad29109b2017-03-30 15:45:51 +0900439 *uncor_ecc_flags |= BIT(err_sector);
Masahiro Yamada7de117f2017-06-07 20:52:12 +0900440 } else if (err_byte < ecc_size) {
Masahiro Yamada20d48592017-03-30 15:45:50 +0900441 /*
Masahiro Yamada7de117f2017-06-07 20:52:12 +0900442 * If err_byte is larger than ecc_size, means error
Masahiro Yamada20d48592017-03-30 15:45:50 +0900443 * happened in OOB, so we ignore it. It's no need for
444 * us to correct it err_device is represented the NAND
445 * error bits are happened in if there are more than
446 * one NAND connected.
447 */
448 int offset;
449 unsigned int flips_in_byte;
450
Masahiro Yamada7de117f2017-06-07 20:52:12 +0900451 offset = (err_sector * ecc_size + err_byte) *
Masahiro Yamada20d48592017-03-30 15:45:50 +0900452 denali->devnum + err_device;
453
454 /* correct the ECC error */
455 flips_in_byte = hweight8(buf[offset] ^ err_cor_value);
456 buf[offset] ^= err_cor_value;
457 mtd->ecc_stats.corrected += flips_in_byte;
458 bitflips += flips_in_byte;
459
460 max_bitflips = max(max_bitflips, bitflips);
461 }
462
463 prev_sector = err_sector;
464 } while (!ECC_LAST_ERR(err_cor_info));
465
466 /*
467 * Once handle all ecc errors, controller will trigger a
468 * ECC_TRANSACTION_DONE interrupt, so here just wait for
469 * a while for this interrupt
470 */
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900471 irq_status = denali_wait_for_irq(denali, INTR__ECC_TRANSACTION_DONE);
472 if (!(irq_status & INTR__ECC_TRANSACTION_DONE))
473 return -EIO;
Masahiro Yamada20d48592017-03-30 15:45:50 +0900474
475 return max_bitflips;
Jason Robertsce082592010-05-13 15:57:33 +0100476}
477
478/* programs the controller to either enable/disable DMA transfers */
David Woodhouseaadff492010-05-13 16:12:43 +0100479static void denali_enable_dma(struct denali_nand_info *denali, bool en)
Jason Robertsce082592010-05-13 15:57:33 +0100480{
Masahiro Yamada5637b692014-09-09 11:01:52 +0900481 iowrite32(en ? DMA_ENABLE__FLAG : 0, denali->flash_reg + DMA_ENABLE);
Jason Robertsce082592010-05-13 15:57:33 +0100482 ioread32(denali->flash_reg + DMA_ENABLE);
483}
484
Masahiro Yamada2291cb82017-06-13 22:45:42 +0900485static void denali_setup_dma64(struct denali_nand_info *denali,
Masahiro Yamada96a376b2017-06-13 22:45:44 +0900486 dma_addr_t dma_addr, int page, int write)
Masahiro Yamada210a2c82017-03-30 15:45:54 +0900487{
488 uint32_t mode;
489 const int page_count = 1;
Masahiro Yamada210a2c82017-03-30 15:45:54 +0900490
Masahiro Yamada2291cb82017-06-13 22:45:42 +0900491 mode = MODE_10 | BANK(denali->flash_bank) | page;
Masahiro Yamada210a2c82017-03-30 15:45:54 +0900492
493 /* DMA is a three step process */
494
495 /*
496 * 1. setup transfer type, interrupt when complete,
497 * burst len = 64 bytes, the number of pages
498 */
Masahiro Yamada96a376b2017-06-13 22:45:44 +0900499 index_addr(denali, mode,
500 0x01002000 | (64 << 16) | (write << 8) | page_count);
Masahiro Yamada210a2c82017-03-30 15:45:54 +0900501
502 /* 2. set memory low address */
Masahiro Yamada2291cb82017-06-13 22:45:42 +0900503 index_addr(denali, mode, dma_addr);
Masahiro Yamada210a2c82017-03-30 15:45:54 +0900504
505 /* 3. set memory high address */
Masahiro Yamada2291cb82017-06-13 22:45:42 +0900506 index_addr(denali, mode, (uint64_t)dma_addr >> 32);
Masahiro Yamada210a2c82017-03-30 15:45:54 +0900507}
508
Masahiro Yamada2291cb82017-06-13 22:45:42 +0900509static void denali_setup_dma32(struct denali_nand_info *denali,
Masahiro Yamada96a376b2017-06-13 22:45:44 +0900510 dma_addr_t dma_addr, int page, int write)
Jason Robertsce082592010-05-13 15:57:33 +0100511{
Masahiro Yamada5637b692014-09-09 11:01:52 +0900512 uint32_t mode;
Jason Robertsce082592010-05-13 15:57:33 +0100513 const int page_count = 1;
Jason Robertsce082592010-05-13 15:57:33 +0100514
515 mode = MODE_10 | BANK(denali->flash_bank);
516
517 /* DMA is a four step process */
518
519 /* 1. setup transfer type and # of pages */
Masahiro Yamada96a376b2017-06-13 22:45:44 +0900520 index_addr(denali, mode | page, 0x2000 | (write << 8) | page_count);
Jason Robertsce082592010-05-13 15:57:33 +0100521
522 /* 2. set memory high address bits 23:8 */
Masahiro Yamada2291cb82017-06-13 22:45:42 +0900523 index_addr(denali, mode | ((dma_addr >> 16) << 8), 0x2200);
Jason Robertsce082592010-05-13 15:57:33 +0100524
525 /* 3. set memory low address bits 23:8 */
Masahiro Yamada2291cb82017-06-13 22:45:42 +0900526 index_addr(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300);
Jason Robertsce082592010-05-13 15:57:33 +0100527
Masahiro Yamada43914a22014-09-09 11:01:51 +0900528 /* 4. interrupt when complete, burst len = 64 bytes */
Jason Robertsce082592010-05-13 15:57:33 +0100529 index_addr(denali, mode | 0x14000, 0x2400);
530}
531
Masahiro Yamada2291cb82017-06-13 22:45:42 +0900532static void denali_setup_dma(struct denali_nand_info *denali,
Masahiro Yamada96a376b2017-06-13 22:45:44 +0900533 dma_addr_t dma_addr, int page, int write)
Masahiro Yamada210a2c82017-03-30 15:45:54 +0900534{
535 if (denali->caps & DENALI_CAP_DMA_64BIT)
Masahiro Yamada96a376b2017-06-13 22:45:44 +0900536 denali_setup_dma64(denali, dma_addr, page, write);
Masahiro Yamada210a2c82017-03-30 15:45:54 +0900537 else
Masahiro Yamada96a376b2017-06-13 22:45:44 +0900538 denali_setup_dma32(denali, dma_addr, page, write);
Masahiro Yamada210a2c82017-03-30 15:45:54 +0900539}
540
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900541static int denali_pio_read(struct denali_nand_info *denali, void *buf,
542 size_t size, int page, int raw)
Jason Robertsce082592010-05-13 15:57:33 +0100543{
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900544 uint32_t addr = BANK(denali->flash_bank) | page;
545 uint32_t *buf32 = (uint32_t *)buf;
546 uint32_t irq_status, ecc_err_mask;
547 int i;
Masahiro Yamadab21ff822017-06-13 22:45:35 +0900548
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900549 if (denali->caps & DENALI_CAP_HW_ECC_FIXUP)
550 ecc_err_mask = INTR__ECC_UNCOR_ERR;
551 else
552 ecc_err_mask = INTR__ECC_ERR;
Jason Robertsce082592010-05-13 15:57:33 +0100553
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900554 denali_reset_irq(denali);
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900555
556 iowrite32(MODE_01 | addr, denali->flash_mem);
557 for (i = 0; i < size / 4; i++)
558 *buf32++ = ioread32(denali->flash_mem + 0x10);
559
560 irq_status = denali_wait_for_irq(denali, INTR__PAGE_XFER_INC);
561 if (!(irq_status & INTR__PAGE_XFER_INC))
562 return -EIO;
563
Masahiro Yamada57a4d8b2017-06-13 22:45:46 +0900564 if (irq_status & INTR__ERASED_PAGE)
565 memset(buf, 0xff, size);
566
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900567 return irq_status & ecc_err_mask ? -EBADMSG : 0;
568}
569
570static int denali_pio_write(struct denali_nand_info *denali,
571 const void *buf, size_t size, int page, int raw)
572{
573 uint32_t addr = BANK(denali->flash_bank) | page;
574 const uint32_t *buf32 = (uint32_t *)buf;
575 uint32_t irq_status;
576 int i;
577
578 denali_reset_irq(denali);
579
580 iowrite32(MODE_01 | addr, denali->flash_mem);
581 for (i = 0; i < size / 4; i++)
582 iowrite32(*buf32++, denali->flash_mem + 0x10);
583
584 irq_status = denali_wait_for_irq(denali,
585 INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL);
586 if (!(irq_status & INTR__PROGRAM_COMP))
587 return -EIO;
588
589 return 0;
590}
591
592static int denali_pio_xfer(struct denali_nand_info *denali, void *buf,
593 size_t size, int page, int raw, int write)
594{
595 if (write)
596 return denali_pio_write(denali, buf, size, page, raw);
597 else
598 return denali_pio_read(denali, buf, size, page, raw);
599}
600
601static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
602 size_t size, int page, int raw, int write)
603{
Masahiro Yamada997cde22017-06-13 22:45:47 +0900604 dma_addr_t dma_addr;
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900605 uint32_t irq_mask, irq_status, ecc_err_mask;
606 enum dma_data_direction dir = write ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
607 int ret = 0;
608
Masahiro Yamada997cde22017-06-13 22:45:47 +0900609 dma_addr = dma_map_single(denali->dev, buf, size, dir);
610 if (dma_mapping_error(denali->dev, dma_addr)) {
611 dev_dbg(denali->dev, "Failed to DMA-map buffer. Trying PIO.\n");
612 return denali_pio_xfer(denali, buf, size, page, raw, write);
613 }
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900614
615 if (write) {
616 /*
617 * INTR__PROGRAM_COMP is never asserted for the DMA transfer.
618 * We can use INTR__DMA_CMD_COMP instead. This flag is asserted
619 * when the page program is completed.
620 */
621 irq_mask = INTR__DMA_CMD_COMP | INTR__PROGRAM_FAIL;
622 ecc_err_mask = 0;
623 } else if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) {
624 irq_mask = INTR__DMA_CMD_COMP;
625 ecc_err_mask = INTR__ECC_UNCOR_ERR;
626 } else {
627 irq_mask = INTR__DMA_CMD_COMP;
628 ecc_err_mask = INTR__ECC_ERR;
629 }
630
Chuanxiao5bac3ac2010-08-05 23:06:04 +0800631 denali_enable_dma(denali, true);
Jason Robertsce082592010-05-13 15:57:33 +0100632
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900633 denali_reset_irq(denali);
634 denali_setup_dma(denali, dma_addr, page, write);
Jason Robertsce082592010-05-13 15:57:33 +0100635
636 /* wait for operation to complete */
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900637 irq_status = denali_wait_for_irq(denali, irq_mask);
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900638 if (!(irq_status & INTR__DMA_CMD_COMP))
Masahiro Yamadab21ff822017-06-13 22:45:35 +0900639 ret = -EIO;
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900640 else if (irq_status & ecc_err_mask)
641 ret = -EBADMSG;
Jason Robertsce082592010-05-13 15:57:33 +0100642
Chuanxiao5bac3ac2010-08-05 23:06:04 +0800643 denali_enable_dma(denali, false);
Masahiro Yamada997cde22017-06-13 22:45:47 +0900644 dma_unmap_single(denali->dev, dma_addr, size, dir);
Josh Wufdbad98d2012-06-25 18:07:45 +0800645
Masahiro Yamada57a4d8b2017-06-13 22:45:46 +0900646 if (irq_status & INTR__ERASED_PAGE)
647 memset(buf, 0xff, size);
648
Masahiro Yamadab21ff822017-06-13 22:45:35 +0900649 return ret;
Jason Robertsce082592010-05-13 15:57:33 +0100650}
651
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900652static int denali_data_xfer(struct denali_nand_info *denali, void *buf,
653 size_t size, int page, int raw, int write)
Jason Robertsce082592010-05-13 15:57:33 +0100654{
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900655 setup_ecc_for_xfer(denali, !raw, raw);
656
657 if (denali->dma_avail)
658 return denali_dma_xfer(denali, buf, size, page, raw, write);
659 else
660 return denali_pio_xfer(denali, buf, size, page, raw, write);
Jason Robertsce082592010-05-13 15:57:33 +0100661}
662
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900663static void denali_oob_xfer(struct mtd_info *mtd, struct nand_chip *chip,
664 int page, int write)
Jason Robertsce082592010-05-13 15:57:33 +0100665{
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900666 struct denali_nand_info *denali = mtd_to_denali(mtd);
667 unsigned int start_cmd = write ? NAND_CMD_SEQIN : NAND_CMD_READ0;
668 unsigned int rnd_cmd = write ? NAND_CMD_RNDIN : NAND_CMD_RNDOUT;
669 int writesize = mtd->writesize;
670 int oobsize = mtd->oobsize;
671 uint8_t *bufpoi = chip->oob_poi;
672 int ecc_steps = chip->ecc.steps;
673 int ecc_size = chip->ecc.size;
674 int ecc_bytes = chip->ecc.bytes;
675 int oob_skip = denali->bbtskipbytes;
676 size_t size = writesize + oobsize;
677 int i, pos, len;
678
679 /* BBM at the beginning of the OOB area */
680 chip->cmdfunc(mtd, start_cmd, writesize, page);
681 if (write)
682 chip->write_buf(mtd, bufpoi, oob_skip);
683 else
684 chip->read_buf(mtd, bufpoi, oob_skip);
685 bufpoi += oob_skip;
686
687 /* OOB ECC */
688 for (i = 0; i < ecc_steps; i++) {
689 pos = ecc_size + i * (ecc_size + ecc_bytes);
690 len = ecc_bytes;
691
692 if (pos >= writesize)
693 pos += oob_skip;
694 else if (pos + len > writesize)
695 len = writesize - pos;
696
697 chip->cmdfunc(mtd, rnd_cmd, pos, -1);
698 if (write)
699 chip->write_buf(mtd, bufpoi, len);
700 else
701 chip->read_buf(mtd, bufpoi, len);
702 bufpoi += len;
703 if (len < ecc_bytes) {
704 len = ecc_bytes - len;
705 chip->cmdfunc(mtd, rnd_cmd, writesize + oob_skip, -1);
706 if (write)
707 chip->write_buf(mtd, bufpoi, len);
708 else
709 chip->read_buf(mtd, bufpoi, len);
710 bufpoi += len;
711 }
712 }
713
714 /* OOB free */
715 len = oobsize - (bufpoi - chip->oob_poi);
716 chip->cmdfunc(mtd, rnd_cmd, size - len, -1);
717 if (write)
718 chip->write_buf(mtd, bufpoi, len);
719 else
720 chip->read_buf(mtd, bufpoi, len);
Jason Robertsce082592010-05-13 15:57:33 +0100721}
722
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900723static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
724 uint8_t *buf, int oob_required, int page)
Jason Robertsce082592010-05-13 15:57:33 +0100725{
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900726 struct denali_nand_info *denali = mtd_to_denali(mtd);
727 int writesize = mtd->writesize;
728 int oobsize = mtd->oobsize;
729 int ecc_steps = chip->ecc.steps;
730 int ecc_size = chip->ecc.size;
731 int ecc_bytes = chip->ecc.bytes;
732 void *dma_buf = denali->buf;
733 int oob_skip = denali->bbtskipbytes;
734 size_t size = writesize + oobsize;
735 int ret, i, pos, len;
736
737 ret = denali_data_xfer(denali, dma_buf, size, page, 1, 0);
738 if (ret)
739 return ret;
740
741 /* Arrange the buffer for syndrome payload/ecc layout */
742 if (buf) {
743 for (i = 0; i < ecc_steps; i++) {
744 pos = i * (ecc_size + ecc_bytes);
745 len = ecc_size;
746
747 if (pos >= writesize)
748 pos += oob_skip;
749 else if (pos + len > writesize)
750 len = writesize - pos;
751
752 memcpy(buf, dma_buf + pos, len);
753 buf += len;
754 if (len < ecc_size) {
755 len = ecc_size - len;
756 memcpy(buf, dma_buf + writesize + oob_skip,
757 len);
758 buf += len;
759 }
760 }
761 }
762
763 if (oob_required) {
764 uint8_t *oob = chip->oob_poi;
765
766 /* BBM at the beginning of the OOB area */
767 memcpy(oob, dma_buf + writesize, oob_skip);
768 oob += oob_skip;
769
770 /* OOB ECC */
771 for (i = 0; i < ecc_steps; i++) {
772 pos = ecc_size + i * (ecc_size + ecc_bytes);
773 len = ecc_bytes;
774
775 if (pos >= writesize)
776 pos += oob_skip;
777 else if (pos + len > writesize)
778 len = writesize - pos;
779
780 memcpy(oob, dma_buf + pos, len);
781 oob += len;
782 if (len < ecc_bytes) {
783 len = ecc_bytes - len;
784 memcpy(oob, dma_buf + writesize + oob_skip,
785 len);
786 oob += len;
787 }
788 }
789
790 /* OOB free */
791 len = oobsize - (oob - chip->oob_poi);
792 memcpy(oob, dma_buf + size - len, len);
793 }
794
795 return 0;
Jason Robertsce082592010-05-13 15:57:33 +0100796}
797
Chuanxiao5bac3ac2010-08-05 23:06:04 +0800798static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
Shmulik Ladkani5c2ffb12012-05-09 13:06:35 +0300799 int page)
Jason Robertsce082592010-05-13 15:57:33 +0100800{
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900801 denali_oob_xfer(mtd, chip, page, 0);
Jason Robertsce082592010-05-13 15:57:33 +0100802
Shmulik Ladkani5c2ffb12012-05-09 13:06:35 +0300803 return 0;
Jason Robertsce082592010-05-13 15:57:33 +0100804}
805
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900806static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
807 int page)
808{
809 struct denali_nand_info *denali = mtd_to_denali(mtd);
810 int status;
811
812 denali_reset_irq(denali);
813
814 denali_oob_xfer(mtd, chip, page, 1);
815
816 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
817 status = chip->waitfunc(mtd, chip);
818
819 return status & NAND_STATUS_FAIL ? -EIO : 0;
820}
821
Jason Robertsce082592010-05-13 15:57:33 +0100822static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
Brian Norris1fbb9382012-05-02 10:14:55 -0700823 uint8_t *buf, int oob_required, int page)
Jason Robertsce082592010-05-13 15:57:33 +0100824{
825 struct denali_nand_info *denali = mtd_to_denali(mtd);
Masahiro Yamadad29109b2017-03-30 15:45:51 +0900826 unsigned long uncor_ecc_flags = 0;
827 int stat = 0;
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900828 int ret;
Jason Robertsce082592010-05-13 15:57:33 +0100829
Masahiro Yamada997cde22017-06-13 22:45:47 +0900830 ret = denali_data_xfer(denali, buf, mtd->writesize, page, 0, 0);
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900831 if (ret && ret != -EBADMSG)
832 return ret;
Jason Robertsce082592010-05-13 15:57:33 +0100833
Masahiro Yamada24715c72017-03-30 15:45:52 +0900834 if (denali->caps & DENALI_CAP_HW_ECC_FIXUP)
835 stat = denali_hw_ecc_fixup(mtd, denali, &uncor_ecc_flags);
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900836 else if (ret == -EBADMSG)
Masahiro Yamada24715c72017-03-30 15:45:52 +0900837 stat = denali_sw_ecc_fixup(mtd, denali, &uncor_ecc_flags, buf);
Jason Robertsce082592010-05-13 15:57:33 +0100838
Masahiro Yamadad29109b2017-03-30 15:45:51 +0900839 if (stat < 0)
840 return stat;
841
842 if (uncor_ecc_flags) {
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900843 ret = denali_read_oob(mtd, chip, page);
844 if (ret)
845 return ret;
Jason Robertsce082592010-05-13 15:57:33 +0100846
Masahiro Yamadad29109b2017-03-30 15:45:51 +0900847 stat = denali_check_erased_page(mtd, chip, buf,
848 uncor_ecc_flags, stat);
Jason Robertsce082592010-05-13 15:57:33 +0100849 }
Masahiro Yamadad29109b2017-03-30 15:45:51 +0900850
851 return stat;
Jason Robertsce082592010-05-13 15:57:33 +0100852}
853
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900854static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
855 const uint8_t *buf, int oob_required, int page)
Jason Robertsce082592010-05-13 15:57:33 +0100856{
857 struct denali_nand_info *denali = mtd_to_denali(mtd);
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900858 int writesize = mtd->writesize;
859 int oobsize = mtd->oobsize;
860 int ecc_steps = chip->ecc.steps;
861 int ecc_size = chip->ecc.size;
862 int ecc_bytes = chip->ecc.bytes;
863 void *dma_buf = denali->buf;
864 int oob_skip = denali->bbtskipbytes;
865 size_t size = writesize + oobsize;
866 int i, pos, len;
Chuanxiao5bac3ac2010-08-05 23:06:04 +0800867
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900868 /*
869 * Fill the buffer with 0xff first except the full page transfer.
870 * This simplifies the logic.
871 */
872 if (!buf || !oob_required)
873 memset(dma_buf, 0xff, size);
Jason Robertsce082592010-05-13 15:57:33 +0100874
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900875 /* Arrange the buffer for syndrome payload/ecc layout */
876 if (buf) {
877 for (i = 0; i < ecc_steps; i++) {
878 pos = i * (ecc_size + ecc_bytes);
879 len = ecc_size;
Jason Robertsce082592010-05-13 15:57:33 +0100880
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900881 if (pos >= writesize)
882 pos += oob_skip;
883 else if (pos + len > writesize)
884 len = writesize - pos;
Jason Robertsce082592010-05-13 15:57:33 +0100885
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900886 memcpy(dma_buf + pos, buf, len);
887 buf += len;
888 if (len < ecc_size) {
889 len = ecc_size - len;
890 memcpy(dma_buf + writesize + oob_skip, buf,
891 len);
892 buf += len;
893 }
894 }
895 }
Jason Robertsce082592010-05-13 15:57:33 +0100896
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900897 if (oob_required) {
898 const uint8_t *oob = chip->oob_poi;
Jason Robertsce082592010-05-13 15:57:33 +0100899
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900900 /* BBM at the beginning of the OOB area */
901 memcpy(dma_buf + writesize, oob, oob_skip);
902 oob += oob_skip;
Jason Robertsce082592010-05-13 15:57:33 +0100903
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900904 /* OOB ECC */
905 for (i = 0; i < ecc_steps; i++) {
906 pos = ecc_size + i * (ecc_size + ecc_bytes);
907 len = ecc_bytes;
Jason Robertsce082592010-05-13 15:57:33 +0100908
Masahiro Yamada26d266e2017-06-13 22:45:45 +0900909 if (pos >= writesize)
910 pos += oob_skip;
911 else if (pos + len > writesize)
912 len = writesize - pos;
913
914 memcpy(dma_buf + pos, oob, len);
915 oob += len;
916 if (len < ecc_bytes) {
917 len = ecc_bytes - len;
918 memcpy(dma_buf + writesize + oob_skip, oob,
919 len);
920 oob += len;
921 }
922 }
923
924 /* OOB free */
925 len = oobsize - (oob - chip->oob_poi);
926 memcpy(dma_buf + size - len, oob, len);
927 }
928
929 return denali_data_xfer(denali, dma_buf, size, page, 1, 1);
930}
931
932static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
933 const uint8_t *buf, int oob_required, int page)
934{
935 struct denali_nand_info *denali = mtd_to_denali(mtd);
936
Masahiro Yamada997cde22017-06-13 22:45:47 +0900937 return denali_data_xfer(denali, (void *)buf, mtd->writesize,
938 page, 0, 1);
Jason Robertsce082592010-05-13 15:57:33 +0100939}
940
Jason Robertsce082592010-05-13 15:57:33 +0100941static void denali_select_chip(struct mtd_info *mtd, int chip)
942{
943 struct denali_nand_info *denali = mtd_to_denali(mtd);
Chuanxiao Dong7cfffac2010-08-10 00:16:51 +0800944
Jason Robertsce082592010-05-13 15:57:33 +0100945 denali->flash_bank = chip;
Jason Robertsce082592010-05-13 15:57:33 +0100946}
947
948static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
949{
Masahiro Yamadafa6134e2017-06-13 22:45:39 +0900950 struct denali_nand_info *denali = mtd_to_denali(mtd);
951 uint32_t irq_status;
952
953 /* R/B# pin transitioned from low to high? */
954 irq_status = denali_wait_for_irq(denali, INTR__INT_ACT);
955
956 return irq_status & INTR__INT_ACT ? 0 : NAND_STATUS_FAIL;
Jason Robertsce082592010-05-13 15:57:33 +0100957}
958
Brian Norris49c50b92014-05-06 16:02:19 -0700959static int denali_erase(struct mtd_info *mtd, int page)
Jason Robertsce082592010-05-13 15:57:33 +0100960{
961 struct denali_nand_info *denali = mtd_to_denali(mtd);
Masahiro Yamada5637b692014-09-09 11:01:52 +0900962 uint32_t cmd, irq_status;
Jason Robertsce082592010-05-13 15:57:33 +0100963
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900964 denali_reset_irq(denali);
Jason Robertsce082592010-05-13 15:57:33 +0100965
966 /* setup page read request for access type */
967 cmd = MODE_10 | BANK(denali->flash_bank) | page;
Masahiro Yamada3157d1e2014-09-09 11:01:53 +0900968 index_addr(denali, cmd, 0x1);
Jason Robertsce082592010-05-13 15:57:33 +0100969
970 /* wait for erase to complete or failure to occur */
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900971 irq_status = denali_wait_for_irq(denali,
972 INTR__ERASE_COMP | INTR__ERASE_FAIL);
Jason Robertsce082592010-05-13 15:57:33 +0100973
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900974 return irq_status & INTR__ERASE_COMP ? 0 : NAND_STATUS_FAIL;
Jason Robertsce082592010-05-13 15:57:33 +0100975}
976
Masahiro Yamada1bb88662017-06-13 22:45:37 +0900977#define DIV_ROUND_DOWN_ULL(ll, d) \
978 ({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; })
979
980static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
981 const struct nand_data_interface *conf)
982{
983 struct denali_nand_info *denali = mtd_to_denali(mtd);
984 const struct nand_sdr_timings *timings;
985 unsigned long t_clk;
986 int acc_clks, re_2_we, re_2_re, we_2_re, addr_2_data;
987 int rdwr_en_lo, rdwr_en_hi, rdwr_en_lo_hi, cs_setup;
988 int addr_2_data_mask;
989 uint32_t tmp;
990
991 timings = nand_get_sdr_timings(conf);
992 if (IS_ERR(timings))
993 return PTR_ERR(timings);
994
995 /* clk_x period in picoseconds */
996 t_clk = DIV_ROUND_DOWN_ULL(1000000000000ULL, denali->clk_x_rate);
997 if (!t_clk)
998 return -EINVAL;
999
1000 if (chipnr == NAND_DATA_IFACE_CHECK_ONLY)
1001 return 0;
1002
1003 /* tREA -> ACC_CLKS */
1004 acc_clks = DIV_ROUND_UP(timings->tREA_max, t_clk);
1005 acc_clks = min_t(int, acc_clks, ACC_CLKS__VALUE);
1006
1007 tmp = ioread32(denali->flash_reg + ACC_CLKS);
1008 tmp &= ~ACC_CLKS__VALUE;
1009 tmp |= acc_clks;
1010 iowrite32(tmp, denali->flash_reg + ACC_CLKS);
1011
1012 /* tRWH -> RE_2_WE */
1013 re_2_we = DIV_ROUND_UP(timings->tRHW_min, t_clk);
1014 re_2_we = min_t(int, re_2_we, RE_2_WE__VALUE);
1015
1016 tmp = ioread32(denali->flash_reg + RE_2_WE);
1017 tmp &= ~RE_2_WE__VALUE;
1018 tmp |= re_2_we;
1019 iowrite32(tmp, denali->flash_reg + RE_2_WE);
1020
1021 /* tRHZ -> RE_2_RE */
1022 re_2_re = DIV_ROUND_UP(timings->tRHZ_max, t_clk);
1023 re_2_re = min_t(int, re_2_re, RE_2_RE__VALUE);
1024
1025 tmp = ioread32(denali->flash_reg + RE_2_RE);
1026 tmp &= ~RE_2_RE__VALUE;
1027 tmp |= re_2_re;
1028 iowrite32(tmp, denali->flash_reg + RE_2_RE);
1029
1030 /* tWHR -> WE_2_RE */
1031 we_2_re = DIV_ROUND_UP(timings->tWHR_min, t_clk);
1032 we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE);
1033
1034 tmp = ioread32(denali->flash_reg + TWHR2_AND_WE_2_RE);
1035 tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE;
1036 tmp |= we_2_re;
1037 iowrite32(tmp, denali->flash_reg + TWHR2_AND_WE_2_RE);
1038
1039 /* tADL -> ADDR_2_DATA */
1040
1041 /* for older versions, ADDR_2_DATA is only 6 bit wide */
1042 addr_2_data_mask = TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA;
1043 if (denali->revision < 0x0501)
1044 addr_2_data_mask >>= 1;
1045
1046 addr_2_data = DIV_ROUND_UP(timings->tADL_min, t_clk);
1047 addr_2_data = min_t(int, addr_2_data, addr_2_data_mask);
1048
1049 tmp = ioread32(denali->flash_reg + TCWAW_AND_ADDR_2_DATA);
1050 tmp &= ~addr_2_data_mask;
1051 tmp |= addr_2_data;
1052 iowrite32(tmp, denali->flash_reg + TCWAW_AND_ADDR_2_DATA);
1053
1054 /* tREH, tWH -> RDWR_EN_HI_CNT */
1055 rdwr_en_hi = DIV_ROUND_UP(max(timings->tREH_min, timings->tWH_min),
1056 t_clk);
1057 rdwr_en_hi = min_t(int, rdwr_en_hi, RDWR_EN_HI_CNT__VALUE);
1058
1059 tmp = ioread32(denali->flash_reg + RDWR_EN_HI_CNT);
1060 tmp &= ~RDWR_EN_HI_CNT__VALUE;
1061 tmp |= rdwr_en_hi;
1062 iowrite32(tmp, denali->flash_reg + RDWR_EN_HI_CNT);
1063
1064 /* tRP, tWP -> RDWR_EN_LO_CNT */
1065 rdwr_en_lo = DIV_ROUND_UP(max(timings->tRP_min, timings->tWP_min),
1066 t_clk);
1067 rdwr_en_lo_hi = DIV_ROUND_UP(max(timings->tRC_min, timings->tWC_min),
1068 t_clk);
1069 rdwr_en_lo_hi = max(rdwr_en_lo_hi, DENALI_CLK_X_MULT);
1070 rdwr_en_lo = max(rdwr_en_lo, rdwr_en_lo_hi - rdwr_en_hi);
1071 rdwr_en_lo = min_t(int, rdwr_en_lo, RDWR_EN_LO_CNT__VALUE);
1072
1073 tmp = ioread32(denali->flash_reg + RDWR_EN_LO_CNT);
1074 tmp &= ~RDWR_EN_LO_CNT__VALUE;
1075 tmp |= rdwr_en_lo;
1076 iowrite32(tmp, denali->flash_reg + RDWR_EN_LO_CNT);
1077
1078 /* tCS, tCEA -> CS_SETUP_CNT */
1079 cs_setup = max3((int)DIV_ROUND_UP(timings->tCS_min, t_clk) - rdwr_en_lo,
1080 (int)DIV_ROUND_UP(timings->tCEA_max, t_clk) - acc_clks,
1081 0);
1082 cs_setup = min_t(int, cs_setup, CS_SETUP_CNT__VALUE);
1083
1084 tmp = ioread32(denali->flash_reg + CS_SETUP_CNT);
1085 tmp &= ~CS_SETUP_CNT__VALUE;
1086 tmp |= cs_setup;
1087 iowrite32(tmp, denali->flash_reg + CS_SETUP_CNT);
1088
1089 return 0;
1090}
Jason Robertsce082592010-05-13 15:57:33 +01001091
Masahiro Yamadaf4862872017-06-13 22:45:40 +09001092static void denali_reset_banks(struct denali_nand_info *denali)
1093{
Masahiro Yamadad49f5792017-06-13 22:45:41 +09001094 u32 irq_status;
Masahiro Yamadaf4862872017-06-13 22:45:40 +09001095 int i;
1096
Masahiro Yamadaf4862872017-06-13 22:45:40 +09001097 for (i = 0; i < denali->max_banks; i++) {
Masahiro Yamadad49f5792017-06-13 22:45:41 +09001098 denali->flash_bank = i;
1099
1100 denali_reset_irq(denali);
1101
1102 iowrite32(DEVICE_RESET__BANK(i),
1103 denali->flash_reg + DEVICE_RESET);
1104
1105 irq_status = denali_wait_for_irq(denali,
1106 INTR__RST_COMP | INTR__INT_ACT | INTR__TIME_OUT);
1107 if (!(irq_status & INTR__INT_ACT))
Masahiro Yamadaf4862872017-06-13 22:45:40 +09001108 break;
1109 }
1110
1111 dev_dbg(denali->dev, "%d chips connected\n", i);
1112 denali->max_banks = i;
Masahiro Yamadaf4862872017-06-13 22:45:40 +09001113}
1114
Jason Robertsce082592010-05-13 15:57:33 +01001115static void denali_hw_init(struct denali_nand_info *denali)
1116{
Masahiro Yamada43914a22014-09-09 11:01:51 +09001117 /*
Masahiro Yamadae7beeee2017-03-30 15:45:57 +09001118 * The REVISION register may not be reliable. Platforms are allowed to
1119 * override it.
1120 */
1121 if (!denali->revision)
1122 denali->revision =
1123 swab16(ioread32(denali->flash_reg + REVISION));
1124
1125 /*
Masahiro Yamada43914a22014-09-09 11:01:51 +09001126 * tell driver how many bit controller will skip before
Chuanxiao Dongdb9a32102010-08-06 18:02:03 +08001127 * writing ECC code in OOB, this register may be already
1128 * set by firmware. So we read this value out.
1129 * if this value is 0, just let it be.
Masahiro Yamada43914a22014-09-09 11:01:51 +09001130 */
Chuanxiao Dongdb9a32102010-08-06 18:02:03 +08001131 denali->bbtskipbytes = ioread32(denali->flash_reg +
1132 SPARE_AREA_SKIP_BYTES);
Jamie Ilesbc27ede2011-06-06 17:11:34 +01001133 detect_max_banks(denali);
Chuanxiao Dong24c3fa32010-08-09 23:59:23 +08001134 iowrite32(0x0F, denali->flash_reg + RB_PIN_ENABLED);
1135 iowrite32(CHIP_EN_DONT_CARE__FLAG,
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +08001136 denali->flash_reg + CHIP_ENABLE_DONT_CARE);
Jason Robertsce082592010-05-13 15:57:33 +01001137
Chuanxiao Dong24c3fa32010-08-09 23:59:23 +08001138 iowrite32(0xffff, denali->flash_reg + SPARE_AREA_MARKER);
Jason Robertsce082592010-05-13 15:57:33 +01001139
1140 /* Should set value for these registers when init */
Chuanxiao Dong24c3fa32010-08-09 23:59:23 +08001141 iowrite32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES);
1142 iowrite32(1, denali->flash_reg + ECC_ENABLE);
Jason Robertsce082592010-05-13 15:57:33 +01001143}
1144
Masahiro Yamada7de117f2017-06-07 20:52:12 +09001145int denali_calc_ecc_bytes(int step_size, int strength)
1146{
1147 /* BCH code. Denali requires ecc.bytes to be multiple of 2 */
1148 return DIV_ROUND_UP(strength * fls(step_size * 8), 16) * 2;
1149}
1150EXPORT_SYMBOL(denali_calc_ecc_bytes);
1151
1152static int denali_ecc_setup(struct mtd_info *mtd, struct nand_chip *chip,
1153 struct denali_nand_info *denali)
1154{
1155 int oobavail = mtd->oobsize - denali->bbtskipbytes;
1156 int ret;
1157
1158 /*
1159 * If .size and .strength are already set (usually by DT),
1160 * check if they are supported by this controller.
1161 */
1162 if (chip->ecc.size && chip->ecc.strength)
1163 return nand_check_ecc_caps(chip, denali->ecc_caps, oobavail);
1164
1165 /*
1166 * We want .size and .strength closest to the chip's requirement
1167 * unless NAND_ECC_MAXIMIZE is requested.
1168 */
1169 if (!(chip->ecc.options & NAND_ECC_MAXIMIZE)) {
1170 ret = nand_match_ecc_req(chip, denali->ecc_caps, oobavail);
1171 if (!ret)
1172 return 0;
1173 }
1174
1175 /* Max ECC strength is the last thing we can do */
1176 return nand_maximize_ecc(chip, denali->ecc_caps, oobavail);
1177}
Boris Brezillon14fad622016-02-03 20:00:11 +01001178
1179static int denali_ooblayout_ecc(struct mtd_info *mtd, int section,
1180 struct mtd_oob_region *oobregion)
1181{
1182 struct denali_nand_info *denali = mtd_to_denali(mtd);
1183 struct nand_chip *chip = mtd_to_nand(mtd);
1184
1185 if (section)
1186 return -ERANGE;
1187
1188 oobregion->offset = denali->bbtskipbytes;
1189 oobregion->length = chip->ecc.total;
1190
1191 return 0;
1192}
1193
1194static int denali_ooblayout_free(struct mtd_info *mtd, int section,
1195 struct mtd_oob_region *oobregion)
1196{
1197 struct denali_nand_info *denali = mtd_to_denali(mtd);
1198 struct nand_chip *chip = mtd_to_nand(mtd);
1199
1200 if (section)
1201 return -ERANGE;
1202
1203 oobregion->offset = chip->ecc.total + denali->bbtskipbytes;
1204 oobregion->length = mtd->oobsize - oobregion->offset;
1205
1206 return 0;
1207}
1208
1209static const struct mtd_ooblayout_ops denali_ooblayout_ops = {
1210 .ecc = denali_ooblayout_ecc,
1211 .free = denali_ooblayout_free,
Jason Robertsce082592010-05-13 15:57:33 +01001212};
1213
1214static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
1215static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
1216
1217static struct nand_bbt_descr bbt_main_descr = {
1218 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
1219 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
1220 .offs = 8,
1221 .len = 4,
1222 .veroffs = 12,
1223 .maxblocks = 4,
1224 .pattern = bbt_pattern,
1225};
1226
1227static struct nand_bbt_descr bbt_mirror_descr = {
1228 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
1229 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
1230 .offs = 8,
1231 .len = 4,
1232 .veroffs = 12,
1233 .maxblocks = 4,
1234 .pattern = mirror_pattern,
1235};
1236
Uwe Kleine-König421f91d2010-06-11 12:17:00 +02001237/* initialize driver data structures */
Brian Norris8c519432013-08-10 22:57:30 -07001238static void denali_drv_init(struct denali_nand_info *denali)
Jason Robertsce082592010-05-13 15:57:33 +01001239{
Masahiro Yamada43914a22014-09-09 11:01:51 +09001240 /*
1241 * the completion object will be used to notify
1242 * the callee that the interrupt is done
1243 */
Jason Robertsce082592010-05-13 15:57:33 +01001244 init_completion(&denali->complete);
1245
Masahiro Yamada43914a22014-09-09 11:01:51 +09001246 /*
1247 * the spinlock will be used to synchronize the ISR with any
1248 * element that might be access shared data (interrupt status)
1249 */
Jason Robertsce082592010-05-13 15:57:33 +01001250 spin_lock_init(&denali->irq_lock);
Jason Robertsce082592010-05-13 15:57:33 +01001251}
1252
Masahiro Yamadae93c1642017-03-23 05:07:21 +09001253static int denali_multidev_fixup(struct denali_nand_info *denali)
Masahiro Yamada6da27b42017-03-23 05:07:20 +09001254{
1255 struct nand_chip *chip = &denali->nand;
1256 struct mtd_info *mtd = nand_to_mtd(chip);
1257
1258 /*
1259 * Support for multi device:
1260 * When the IP configuration is x16 capable and two x8 chips are
1261 * connected in parallel, DEVICES_CONNECTED should be set to 2.
1262 * In this case, the core framework knows nothing about this fact,
1263 * so we should tell it the _logical_ pagesize and anything necessary.
1264 */
1265 denali->devnum = ioread32(denali->flash_reg + DEVICES_CONNECTED);
1266
Masahiro Yamadacc5d8032017-03-23 05:07:22 +09001267 /*
1268 * On some SoCs, DEVICES_CONNECTED is not auto-detected.
1269 * For those, DEVICES_CONNECTED is left to 0. Set 1 if it is the case.
1270 */
1271 if (denali->devnum == 0) {
1272 denali->devnum = 1;
1273 iowrite32(1, denali->flash_reg + DEVICES_CONNECTED);
1274 }
1275
Masahiro Yamadae93c1642017-03-23 05:07:21 +09001276 if (denali->devnum == 1)
1277 return 0;
1278
1279 if (denali->devnum != 2) {
1280 dev_err(denali->dev, "unsupported number of devices %d\n",
1281 denali->devnum);
1282 return -EINVAL;
1283 }
1284
1285 /* 2 chips in parallel */
1286 mtd->size <<= 1;
1287 mtd->erasesize <<= 1;
1288 mtd->writesize <<= 1;
1289 mtd->oobsize <<= 1;
1290 chip->chipsize <<= 1;
1291 chip->page_shift += 1;
1292 chip->phys_erase_shift += 1;
1293 chip->bbt_erase_shift += 1;
1294 chip->chip_shift += 1;
1295 chip->pagemask <<= 1;
1296 chip->ecc.size <<= 1;
1297 chip->ecc.bytes <<= 1;
1298 chip->ecc.strength <<= 1;
1299 denali->bbtskipbytes <<= 1;
1300
1301 return 0;
Masahiro Yamada6da27b42017-03-23 05:07:20 +09001302}
1303
Dinh Nguyen2a0a2882012-09-27 10:58:05 -06001304int denali_init(struct denali_nand_info *denali)
Jason Robertsce082592010-05-13 15:57:33 +01001305{
Masahiro Yamada1394a722017-03-23 05:07:17 +09001306 struct nand_chip *chip = &denali->nand;
1307 struct mtd_info *mtd = nand_to_mtd(chip);
Dinh Nguyen2a0a2882012-09-27 10:58:05 -06001308 int ret;
Jason Robertsce082592010-05-13 15:57:33 +01001309
Boris BREZILLON442f201b2015-12-11 15:06:00 +01001310 mtd->dev.parent = denali->dev;
Jason Robertsce082592010-05-13 15:57:33 +01001311 denali_hw_init(denali);
1312 denali_drv_init(denali);
1313
Masahiro Yamadac19e31d2017-06-13 22:45:38 +09001314 denali_clear_irq_all(denali);
1315
Masahiro Yamada7ebb8d02016-11-09 13:35:27 +09001316 /* Request IRQ after all the hardware initialization is finished */
1317 ret = devm_request_irq(denali->dev, denali->irq, denali_isr,
1318 IRQF_SHARED, DENALI_NAND_NAME, denali);
1319 if (ret) {
Masahiro Yamada789ccf12016-11-09 13:35:24 +09001320 dev_err(denali->dev, "Unable to request IRQ\n");
Masahiro Yamada7ebb8d02016-11-09 13:35:27 +09001321 return ret;
Jason Robertsce082592010-05-13 15:57:33 +01001322 }
1323
Masahiro Yamadac19e31d2017-06-13 22:45:38 +09001324 denali_enable_irq(denali);
Masahiro Yamadad49f5792017-06-13 22:45:41 +09001325 denali_reset_banks(denali);
1326
1327 denali->flash_bank = CHIP_SELECT_INVALID;
Masahiro Yamadac19e31d2017-06-13 22:45:38 +09001328
Masahiro Yamada63757d42017-03-23 05:07:18 +09001329 nand_set_flash_node(chip, denali->dev->of_node);
Masahiro Yamada8aabdf32017-03-30 15:45:48 +09001330 /* Fallback to the default name if DT did not give "label" property */
1331 if (!mtd->name)
1332 mtd->name = "denali-nand";
Jason Robertsce082592010-05-13 15:57:33 +01001333
1334 /* register the driver with the NAND core subsystem */
Masahiro Yamada1394a722017-03-23 05:07:17 +09001335 chip->select_chip = denali_select_chip;
Masahiro Yamada1394a722017-03-23 05:07:17 +09001336 chip->read_byte = denali_read_byte;
Masahiro Yamadafa6134e2017-06-13 22:45:39 +09001337 chip->write_byte = denali_write_byte;
1338 chip->read_word = denali_read_word;
1339 chip->cmd_ctrl = denali_cmd_ctrl;
1340 chip->dev_ready = denali_dev_ready;
Masahiro Yamada1394a722017-03-23 05:07:17 +09001341 chip->waitfunc = denali_waitfunc;
Jason Robertsce082592010-05-13 15:57:33 +01001342
Masahiro Yamada1bb88662017-06-13 22:45:37 +09001343 /* clk rate info is needed for setup_data_interface */
1344 if (denali->clk_x_rate)
1345 chip->setup_data_interface = denali_setup_data_interface;
1346
Masahiro Yamada43914a22014-09-09 11:01:51 +09001347 /*
1348 * scan for NAND devices attached to the controller
Jason Robertsce082592010-05-13 15:57:33 +01001349 * this is the first stage in a two step process to register
Masahiro Yamada43914a22014-09-09 11:01:51 +09001350 * with the nand subsystem
1351 */
Masahiro Yamadaa227d4e2016-11-09 13:35:28 +09001352 ret = nand_scan_ident(mtd, denali->max_banks, NULL);
1353 if (ret)
Masahiro Yamadac19e31d2017-06-13 22:45:38 +09001354 goto disable_irq;
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001355
Masahiro Yamada26d266e2017-06-13 22:45:45 +09001356 if (ioread32(denali->flash_reg + FEATURES) & FEATURES__DMA)
1357 denali->dma_avail = 1;
1358
1359 if (denali->dma_avail) {
1360 int dma_bit = denali->caps & DENALI_CAP_DMA_64BIT ? 64 : 32;
1361
1362 ret = dma_set_mask(denali->dev, DMA_BIT_MASK(dma_bit));
1363 if (ret) {
1364 dev_info(denali->dev,
1365 "Failed to set DMA mask. Disabling DMA.\n");
1366 denali->dma_avail = 0;
1367 }
Huang Shijiee07caa32013-12-21 00:02:28 +08001368 }
1369
Masahiro Yamada26d266e2017-06-13 22:45:45 +09001370 if (denali->dma_avail) {
Masahiro Yamada997cde22017-06-13 22:45:47 +09001371 chip->options |= NAND_USE_BOUNCE_BUFFER;
1372 chip->buf_align = 16;
Chuanxiao.Dong664065242010-08-06 18:48:21 +08001373 }
1374
Masahiro Yamada43914a22014-09-09 11:01:51 +09001375 /*
Masahiro Yamada43914a22014-09-09 11:01:51 +09001376 * second stage of the NAND scan
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001377 * this stage requires information regarding ECC and
Masahiro Yamada43914a22014-09-09 11:01:51 +09001378 * bad block management.
1379 */
Jason Robertsce082592010-05-13 15:57:33 +01001380
1381 /* Bad block management */
Masahiro Yamada1394a722017-03-23 05:07:17 +09001382 chip->bbt_td = &bbt_main_descr;
1383 chip->bbt_md = &bbt_mirror_descr;
Jason Robertsce082592010-05-13 15:57:33 +01001384
1385 /* skip the scan for now until we have OOB read and write support */
Masahiro Yamada1394a722017-03-23 05:07:17 +09001386 chip->bbt_options |= NAND_BBT_USE_FLASH;
1387 chip->options |= NAND_SKIP_BBTSCAN;
1388 chip->ecc.mode = NAND_ECC_HW_SYNDROME;
Jason Robertsce082592010-05-13 15:57:33 +01001389
Graham Moored99d7282015-01-14 09:38:50 -06001390 /* no subpage writes on denali */
Masahiro Yamada1394a722017-03-23 05:07:17 +09001391 chip->options |= NAND_NO_SUBPAGE_WRITE;
Graham Moored99d7282015-01-14 09:38:50 -06001392
Masahiro Yamada7de117f2017-06-07 20:52:12 +09001393 ret = denali_ecc_setup(mtd, chip, denali);
1394 if (ret) {
1395 dev_err(denali->dev, "Failed to setup ECC settings.\n");
Masahiro Yamadac19e31d2017-06-13 22:45:38 +09001396 goto disable_irq;
Jason Robertsce082592010-05-13 15:57:33 +01001397 }
1398
Masahiro Yamada7de117f2017-06-07 20:52:12 +09001399 dev_dbg(denali->dev,
1400 "chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
1401 chip->ecc.size, chip->ecc.strength, chip->ecc.bytes);
1402
Masahiro Yamada57a4d8b2017-06-13 22:45:46 +09001403 iowrite32(MAKE_ECC_CORRECTION(chip->ecc.strength, 1),
1404 denali->flash_reg + ECC_CORRECTION);
Masahiro Yamada0615e7a2017-06-07 20:52:13 +09001405 iowrite32(mtd->erasesize / mtd->writesize,
1406 denali->flash_reg + PAGES_PER_BLOCK);
1407 iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0,
1408 denali->flash_reg + DEVICE_WIDTH);
1409 iowrite32(mtd->writesize, denali->flash_reg + DEVICE_MAIN_AREA_SIZE);
1410 iowrite32(mtd->oobsize, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
Masahiro Yamada7de117f2017-06-07 20:52:12 +09001411
1412 iowrite32(chip->ecc.size, denali->flash_reg + CFG_DATA_BLOCK_SIZE);
1413 iowrite32(chip->ecc.size, denali->flash_reg + CFG_LAST_DATA_BLOCK_SIZE);
1414 /* chip->ecc.steps is set by nand_scan_tail(); not available here */
1415 iowrite32(mtd->writesize / chip->ecc.size,
1416 denali->flash_reg + CFG_NUM_DATA_BLOCKS);
1417
Boris Brezillon14fad622016-02-03 20:00:11 +01001418 mtd_set_ooblayout(mtd, &denali_ooblayout_ops);
Chuanxiao Dongdb9a32102010-08-06 18:02:03 +08001419
Masahiro Yamadafa6134e2017-06-13 22:45:39 +09001420 if (chip->options & NAND_BUSWIDTH_16) {
1421 chip->read_buf = denali_read_buf16;
1422 chip->write_buf = denali_write_buf16;
1423 } else {
1424 chip->read_buf = denali_read_buf;
1425 chip->write_buf = denali_write_buf;
1426 }
Masahiro Yamadab21ff822017-06-13 22:45:35 +09001427 chip->ecc.options |= NAND_ECC_CUSTOM_PAGE_ACCESS;
Masahiro Yamada1394a722017-03-23 05:07:17 +09001428 chip->ecc.read_page = denali_read_page;
1429 chip->ecc.read_page_raw = denali_read_page_raw;
1430 chip->ecc.write_page = denali_write_page;
1431 chip->ecc.write_page_raw = denali_write_page_raw;
1432 chip->ecc.read_oob = denali_read_oob;
1433 chip->ecc.write_oob = denali_write_oob;
1434 chip->erase = denali_erase;
Jason Robertsce082592010-05-13 15:57:33 +01001435
Masahiro Yamadae93c1642017-03-23 05:07:21 +09001436 ret = denali_multidev_fixup(denali);
1437 if (ret)
Masahiro Yamadac19e31d2017-06-13 22:45:38 +09001438 goto disable_irq;
Masahiro Yamada6da27b42017-03-23 05:07:20 +09001439
Masahiro Yamada7d370b22017-06-13 22:45:48 +09001440 /*
1441 * This buffer is DMA-mapped by denali_{read,write}_page_raw. Do not
1442 * use devm_kmalloc() because the memory allocated by devm_ does not
1443 * guarantee DMA-safe alignment.
1444 */
1445 denali->buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
1446 if (!denali->buf) {
1447 ret = -ENOMEM;
1448 goto disable_irq;
1449 }
1450
Masahiro Yamadaa227d4e2016-11-09 13:35:28 +09001451 ret = nand_scan_tail(mtd);
1452 if (ret)
Masahiro Yamada7d370b22017-06-13 22:45:48 +09001453 goto free_buf;
Jason Robertsce082592010-05-13 15:57:33 +01001454
Boris BREZILLON442f201b2015-12-11 15:06:00 +01001455 ret = mtd_device_register(mtd, NULL, 0);
Jason Robertsce082592010-05-13 15:57:33 +01001456 if (ret) {
Masahiro Yamada789ccf12016-11-09 13:35:24 +09001457 dev_err(denali->dev, "Failed to register MTD: %d\n", ret);
Masahiro Yamada7d370b22017-06-13 22:45:48 +09001458 goto free_buf;
Jason Robertsce082592010-05-13 15:57:33 +01001459 }
1460 return 0;
1461
Masahiro Yamada7d370b22017-06-13 22:45:48 +09001462free_buf:
1463 kfree(denali->buf);
Masahiro Yamadac19e31d2017-06-13 22:45:38 +09001464disable_irq:
1465 denali_disable_irq(denali);
Dinh Nguyen2a0a2882012-09-27 10:58:05 -06001466
Jason Robertsce082592010-05-13 15:57:33 +01001467 return ret;
1468}
Dinh Nguyen2a0a2882012-09-27 10:58:05 -06001469EXPORT_SYMBOL(denali_init);
Jason Robertsce082592010-05-13 15:57:33 +01001470
1471/* driver exit point */
Dinh Nguyen2a0a2882012-09-27 10:58:05 -06001472void denali_remove(struct denali_nand_info *denali)
Jason Robertsce082592010-05-13 15:57:33 +01001473{
Boris BREZILLON442f201b2015-12-11 15:06:00 +01001474 struct mtd_info *mtd = nand_to_mtd(&denali->nand);
Boris BREZILLON320092a2015-12-11 15:02:34 +01001475
Boris BREZILLON442f201b2015-12-11 15:06:00 +01001476 nand_release(mtd);
Masahiro Yamada7d370b22017-06-13 22:45:48 +09001477 kfree(denali->buf);
Masahiro Yamadac19e31d2017-06-13 22:45:38 +09001478 denali_disable_irq(denali);
Jason Robertsce082592010-05-13 15:57:33 +01001479}
Dinh Nguyen2a0a2882012-09-27 10:58:05 -06001480EXPORT_SYMBOL(denali_remove);