blob: e5cb8f12d5bb50fe4d608a6fc36ea0b38ed54deb [file] [log] [blame]
Archit Tanejac76b78d2016-02-03 14:29:50 +05301/*
2 * Copyright (c) 2016, The Linux Foundation. All rights reserved.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/clk.h>
15#include <linux/slab.h>
16#include <linux/bitops.h>
17#include <linux/dma-mapping.h>
18#include <linux/dmaengine.h>
19#include <linux/module.h>
20#include <linux/mtd/nand.h>
21#include <linux/mtd/partitions.h>
22#include <linux/of.h>
23#include <linux/of_device.h>
Archit Tanejac76b78d2016-02-03 14:29:50 +053024#include <linux/delay.h>
25
26/* NANDc reg offsets */
27#define NAND_FLASH_CMD 0x00
28#define NAND_ADDR0 0x04
29#define NAND_ADDR1 0x08
30#define NAND_FLASH_CHIP_SELECT 0x0c
31#define NAND_EXEC_CMD 0x10
32#define NAND_FLASH_STATUS 0x14
33#define NAND_BUFFER_STATUS 0x18
34#define NAND_DEV0_CFG0 0x20
35#define NAND_DEV0_CFG1 0x24
36#define NAND_DEV0_ECC_CFG 0x28
37#define NAND_DEV1_ECC_CFG 0x2c
38#define NAND_DEV1_CFG0 0x30
39#define NAND_DEV1_CFG1 0x34
40#define NAND_READ_ID 0x40
41#define NAND_READ_STATUS 0x44
42#define NAND_DEV_CMD0 0xa0
43#define NAND_DEV_CMD1 0xa4
44#define NAND_DEV_CMD2 0xa8
45#define NAND_DEV_CMD_VLD 0xac
46#define SFLASHC_BURST_CFG 0xe0
47#define NAND_ERASED_CW_DETECT_CFG 0xe8
48#define NAND_ERASED_CW_DETECT_STATUS 0xec
49#define NAND_EBI2_ECC_BUF_CFG 0xf0
50#define FLASH_BUF_ACC 0x100
51
52#define NAND_CTRL 0xf00
53#define NAND_VERSION 0xf08
54#define NAND_READ_LOCATION_0 0xf20
55#define NAND_READ_LOCATION_1 0xf24
56
57/* dummy register offsets, used by write_reg_dma */
58#define NAND_DEV_CMD1_RESTORE 0xdead
59#define NAND_DEV_CMD_VLD_RESTORE 0xbeef
60
61/* NAND_FLASH_CMD bits */
62#define PAGE_ACC BIT(4)
63#define LAST_PAGE BIT(5)
64
65/* NAND_FLASH_CHIP_SELECT bits */
66#define NAND_DEV_SEL 0
67#define DM_EN BIT(2)
68
69/* NAND_FLASH_STATUS bits */
70#define FS_OP_ERR BIT(4)
71#define FS_READY_BSY_N BIT(5)
72#define FS_MPU_ERR BIT(8)
73#define FS_DEVICE_STS_ERR BIT(16)
74#define FS_DEVICE_WP BIT(23)
75
76/* NAND_BUFFER_STATUS bits */
77#define BS_UNCORRECTABLE_BIT BIT(8)
78#define BS_CORRECTABLE_ERR_MSK 0x1f
79
80/* NAND_DEVn_CFG0 bits */
81#define DISABLE_STATUS_AFTER_WRITE 4
82#define CW_PER_PAGE 6
83#define UD_SIZE_BYTES 9
84#define ECC_PARITY_SIZE_BYTES_RS 19
85#define SPARE_SIZE_BYTES 23
86#define NUM_ADDR_CYCLES 27
87#define STATUS_BFR_READ 30
88#define SET_RD_MODE_AFTER_STATUS 31
89
90/* NAND_DEVn_CFG0 bits */
91#define DEV0_CFG1_ECC_DISABLE 0
92#define WIDE_FLASH 1
93#define NAND_RECOVERY_CYCLES 2
94#define CS_ACTIVE_BSY 5
95#define BAD_BLOCK_BYTE_NUM 6
96#define BAD_BLOCK_IN_SPARE_AREA 16
97#define WR_RD_BSY_GAP 17
98#define ENABLE_BCH_ECC 27
99
100/* NAND_DEV0_ECC_CFG bits */
101#define ECC_CFG_ECC_DISABLE 0
102#define ECC_SW_RESET 1
103#define ECC_MODE 4
104#define ECC_PARITY_SIZE_BYTES_BCH 8
105#define ECC_NUM_DATA_BYTES 16
106#define ECC_FORCE_CLK_OPEN 30
107
108/* NAND_DEV_CMD1 bits */
109#define READ_ADDR 0
110
111/* NAND_DEV_CMD_VLD bits */
Abhishek Sahud8a9b322017-08-11 17:09:16 +0530112#define READ_START_VLD BIT(0)
113#define READ_STOP_VLD BIT(1)
114#define WRITE_START_VLD BIT(2)
115#define ERASE_START_VLD BIT(3)
116#define SEQ_READ_START_VLD BIT(4)
Archit Tanejac76b78d2016-02-03 14:29:50 +0530117
118/* NAND_EBI2_ECC_BUF_CFG bits */
119#define NUM_STEPS 0
120
121/* NAND_ERASED_CW_DETECT_CFG bits */
122#define ERASED_CW_ECC_MASK 1
123#define AUTO_DETECT_RES 0
124#define MASK_ECC (1 << ERASED_CW_ECC_MASK)
125#define RESET_ERASED_DET (1 << AUTO_DETECT_RES)
126#define ACTIVE_ERASED_DET (0 << AUTO_DETECT_RES)
127#define CLR_ERASED_PAGE_DET (RESET_ERASED_DET | MASK_ECC)
128#define SET_ERASED_PAGE_DET (ACTIVE_ERASED_DET | MASK_ECC)
129
130/* NAND_ERASED_CW_DETECT_STATUS bits */
131#define PAGE_ALL_ERASED BIT(7)
132#define CODEWORD_ALL_ERASED BIT(6)
133#define PAGE_ERASED BIT(5)
134#define CODEWORD_ERASED BIT(4)
135#define ERASED_PAGE (PAGE_ALL_ERASED | PAGE_ERASED)
136#define ERASED_CW (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
137
138/* Version Mask */
139#define NAND_VERSION_MAJOR_MASK 0xf0000000
140#define NAND_VERSION_MAJOR_SHIFT 28
141#define NAND_VERSION_MINOR_MASK 0x0fff0000
142#define NAND_VERSION_MINOR_SHIFT 16
143
144/* NAND OP_CMDs */
145#define PAGE_READ 0x2
146#define PAGE_READ_WITH_ECC 0x3
147#define PAGE_READ_WITH_ECC_SPARE 0x4
148#define PROGRAM_PAGE 0x6
149#define PAGE_PROGRAM_WITH_ECC 0x7
150#define PROGRAM_PAGE_SPARE 0x9
151#define BLOCK_ERASE 0xa
152#define FETCH_ID 0xb
153#define RESET_DEVICE 0xd
154
Abhishek Sahud8a9b322017-08-11 17:09:16 +0530155/* Default Value for NAND_DEV_CMD_VLD */
156#define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \
157 ERASE_START_VLD | SEQ_READ_START_VLD)
158
Archit Tanejac76b78d2016-02-03 14:29:50 +0530159/*
160 * the NAND controller performs reads/writes with ECC in 516 byte chunks.
161 * the driver calls the chunks 'step' or 'codeword' interchangeably
162 */
163#define NANDC_STEP_SIZE 512
164
165/*
166 * the largest page size we support is 8K, this will have 16 steps/codewords
167 * of 512 bytes each
168 */
169#define MAX_NUM_STEPS (SZ_8K / NANDC_STEP_SIZE)
170
171/* we read at most 3 registers per codeword scan */
172#define MAX_REG_RD (3 * MAX_NUM_STEPS)
173
174/* ECC modes supported by the controller */
175#define ECC_NONE BIT(0)
176#define ECC_RS_4BIT BIT(1)
177#define ECC_BCH_4BIT BIT(2)
178#define ECC_BCH_8BIT BIT(3)
179
180struct desc_info {
181 struct list_head node;
182
183 enum dma_data_direction dir;
184 struct scatterlist sgl;
185 struct dma_async_tx_descriptor *dma_desc;
186};
187
188/*
189 * holds the current register values that we want to write. acts as a contiguous
190 * chunk of memory which we use to write the controller registers through DMA.
191 */
192struct nandc_regs {
193 __le32 cmd;
194 __le32 addr0;
195 __le32 addr1;
196 __le32 chip_sel;
197 __le32 exec;
198
199 __le32 cfg0;
200 __le32 cfg1;
201 __le32 ecc_bch_cfg;
202
203 __le32 clrflashstatus;
204 __le32 clrreadstatus;
205
206 __le32 cmd1;
207 __le32 vld;
208
209 __le32 orig_cmd1;
210 __le32 orig_vld;
211
212 __le32 ecc_buf_cfg;
213};
214
215/*
216 * NAND controller data struct
217 *
218 * @controller: base controller structure
219 * @host_list: list containing all the chips attached to the
220 * controller
221 * @dev: parent device
222 * @base: MMIO base
223 * @base_dma: physical base address of controller registers
224 * @core_clk: controller clock
225 * @aon_clk: another controller clock
226 *
227 * @chan: dma channel
228 * @cmd_crci: ADM DMA CRCI for command flow control
229 * @data_crci: ADM DMA CRCI for data flow control
230 * @desc_list: DMA descriptor list (list of desc_infos)
231 *
232 * @data_buffer: our local DMA buffer for page read/writes,
233 * used when we can't use the buffer provided
234 * by upper layers directly
235 * @buf_size/count/start: markers for chip->read_buf/write_buf functions
236 * @reg_read_buf: local buffer for reading back registers via DMA
237 * @reg_read_pos: marker for data read in reg_read_buf
238 *
239 * @regs: a contiguous chunk of memory for DMA register
240 * writes. contains the register values to be
241 * written to controller
242 * @cmd1/vld: some fixed controller register values
243 * @ecc_modes: supported ECC modes by the current controller,
244 * initialized via DT match data
245 */
246struct qcom_nand_controller {
247 struct nand_hw_control controller;
248 struct list_head host_list;
249
250 struct device *dev;
251
252 void __iomem *base;
253 dma_addr_t base_dma;
254
255 struct clk *core_clk;
256 struct clk *aon_clk;
257
258 struct dma_chan *chan;
259 unsigned int cmd_crci;
260 unsigned int data_crci;
261 struct list_head desc_list;
262
263 u8 *data_buffer;
264 int buf_size;
265 int buf_count;
266 int buf_start;
267
268 __le32 *reg_read_buf;
269 int reg_read_pos;
270
271 struct nandc_regs *regs;
272
273 u32 cmd1, vld;
274 u32 ecc_modes;
275};
276
277/*
278 * NAND chip structure
279 *
280 * @chip: base NAND chip structure
281 * @node: list node to add itself to host_list in
282 * qcom_nand_controller
283 *
284 * @cs: chip select value for this chip
285 * @cw_size: the number of bytes in a single step/codeword
286 * of a page, consisting of all data, ecc, spare
287 * and reserved bytes
288 * @cw_data: the number of bytes within a codeword protected
289 * by ECC
290 * @use_ecc: request the controller to use ECC for the
291 * upcoming read/write
292 * @bch_enabled: flag to tell whether BCH ECC mode is used
293 * @ecc_bytes_hw: ECC bytes used by controller hardware for this
294 * chip
295 * @status: value to be returned if NAND_CMD_STATUS command
296 * is executed
297 * @last_command: keeps track of last command on this chip. used
298 * for reading correct status
299 *
300 * @cfg0, cfg1, cfg0_raw..: NANDc register configurations needed for
301 * ecc/non-ecc mode for the current nand flash
302 * device
303 */
304struct qcom_nand_host {
305 struct nand_chip chip;
306 struct list_head node;
307
308 int cs;
309 int cw_size;
310 int cw_data;
311 bool use_ecc;
312 bool bch_enabled;
313 int ecc_bytes_hw;
314 int spare_bytes;
315 int bbm_size;
316 u8 status;
317 int last_command;
318
319 u32 cfg0, cfg1;
320 u32 cfg0_raw, cfg1_raw;
321 u32 ecc_buf_cfg;
322 u32 ecc_bch_cfg;
323 u32 clrflashstatus;
324 u32 clrreadstatus;
325};
326
327static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
328{
329 return container_of(chip, struct qcom_nand_host, chip);
330}
331
332static inline struct qcom_nand_controller *
333get_qcom_nand_controller(struct nand_chip *chip)
334{
335 return container_of(chip->controller, struct qcom_nand_controller,
336 controller);
337}
338
339static inline u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
340{
341 return ioread32(nandc->base + offset);
342}
343
344static inline void nandc_write(struct qcom_nand_controller *nandc, int offset,
345 u32 val)
346{
347 iowrite32(val, nandc->base + offset);
348}
349
350static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset)
351{
352 switch (offset) {
353 case NAND_FLASH_CMD:
354 return &regs->cmd;
355 case NAND_ADDR0:
356 return &regs->addr0;
357 case NAND_ADDR1:
358 return &regs->addr1;
359 case NAND_FLASH_CHIP_SELECT:
360 return &regs->chip_sel;
361 case NAND_EXEC_CMD:
362 return &regs->exec;
363 case NAND_FLASH_STATUS:
364 return &regs->clrflashstatus;
365 case NAND_DEV0_CFG0:
366 return &regs->cfg0;
367 case NAND_DEV0_CFG1:
368 return &regs->cfg1;
369 case NAND_DEV0_ECC_CFG:
370 return &regs->ecc_bch_cfg;
371 case NAND_READ_STATUS:
372 return &regs->clrreadstatus;
373 case NAND_DEV_CMD1:
374 return &regs->cmd1;
375 case NAND_DEV_CMD1_RESTORE:
376 return &regs->orig_cmd1;
377 case NAND_DEV_CMD_VLD:
378 return &regs->vld;
379 case NAND_DEV_CMD_VLD_RESTORE:
380 return &regs->orig_vld;
381 case NAND_EBI2_ECC_BUF_CFG:
382 return &regs->ecc_buf_cfg;
383 default:
384 return NULL;
385 }
386}
387
388static void nandc_set_reg(struct qcom_nand_controller *nandc, int offset,
389 u32 val)
390{
391 struct nandc_regs *regs = nandc->regs;
392 __le32 *reg;
393
394 reg = offset_to_nandc_reg(regs, offset);
395
396 if (reg)
397 *reg = cpu_to_le32(val);
398}
399
400/* helper to configure address register values */
401static void set_address(struct qcom_nand_host *host, u16 column, int page)
402{
403 struct nand_chip *chip = &host->chip;
404 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
405
406 if (chip->options & NAND_BUSWIDTH_16)
407 column >>= 1;
408
409 nandc_set_reg(nandc, NAND_ADDR0, page << 16 | column);
410 nandc_set_reg(nandc, NAND_ADDR1, page >> 16 & 0xff);
411}
412
413/*
414 * update_rw_regs: set up read/write register values, these will be
415 * written to the NAND controller registers via DMA
416 *
417 * @num_cw: number of steps for the read/write operation
418 * @read: read or write operation
419 */
420static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read)
421{
422 struct nand_chip *chip = &host->chip;
423 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
424 u32 cmd, cfg0, cfg1, ecc_bch_cfg;
425
426 if (read) {
427 if (host->use_ecc)
428 cmd = PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
429 else
430 cmd = PAGE_READ | PAGE_ACC | LAST_PAGE;
431 } else {
432 cmd = PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
433 }
434
435 if (host->use_ecc) {
436 cfg0 = (host->cfg0 & ~(7U << CW_PER_PAGE)) |
437 (num_cw - 1) << CW_PER_PAGE;
438
439 cfg1 = host->cfg1;
440 ecc_bch_cfg = host->ecc_bch_cfg;
441 } else {
442 cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
443 (num_cw - 1) << CW_PER_PAGE;
444
445 cfg1 = host->cfg1_raw;
446 ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
447 }
448
449 nandc_set_reg(nandc, NAND_FLASH_CMD, cmd);
450 nandc_set_reg(nandc, NAND_DEV0_CFG0, cfg0);
451 nandc_set_reg(nandc, NAND_DEV0_CFG1, cfg1);
452 nandc_set_reg(nandc, NAND_DEV0_ECC_CFG, ecc_bch_cfg);
453 nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg);
454 nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
455 nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
456 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
457}
458
459static int prep_dma_desc(struct qcom_nand_controller *nandc, bool read,
460 int reg_off, const void *vaddr, int size,
461 bool flow_control)
462{
463 struct desc_info *desc;
464 struct dma_async_tx_descriptor *dma_desc;
465 struct scatterlist *sgl;
466 struct dma_slave_config slave_conf;
467 enum dma_transfer_direction dir_eng;
468 int ret;
469
470 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
471 if (!desc)
472 return -ENOMEM;
473
474 sgl = &desc->sgl;
475
476 sg_init_one(sgl, vaddr, size);
477
478 if (read) {
479 dir_eng = DMA_DEV_TO_MEM;
480 desc->dir = DMA_FROM_DEVICE;
481 } else {
482 dir_eng = DMA_MEM_TO_DEV;
483 desc->dir = DMA_TO_DEVICE;
484 }
485
486 ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
487 if (ret == 0) {
488 ret = -ENOMEM;
489 goto err;
490 }
491
492 memset(&slave_conf, 0x00, sizeof(slave_conf));
493
494 slave_conf.device_fc = flow_control;
495 if (read) {
496 slave_conf.src_maxburst = 16;
497 slave_conf.src_addr = nandc->base_dma + reg_off;
498 slave_conf.slave_id = nandc->data_crci;
499 } else {
500 slave_conf.dst_maxburst = 16;
501 slave_conf.dst_addr = nandc->base_dma + reg_off;
502 slave_conf.slave_id = nandc->cmd_crci;
503 }
504
505 ret = dmaengine_slave_config(nandc->chan, &slave_conf);
506 if (ret) {
507 dev_err(nandc->dev, "failed to configure dma channel\n");
508 goto err;
509 }
510
511 dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
512 if (!dma_desc) {
513 dev_err(nandc->dev, "failed to prepare desc\n");
514 ret = -EINVAL;
515 goto err;
516 }
517
518 desc->dma_desc = dma_desc;
519
520 list_add_tail(&desc->node, &nandc->desc_list);
521
522 return 0;
523err:
524 kfree(desc);
525
526 return ret;
527}
528
529/*
530 * read_reg_dma: prepares a descriptor to read a given number of
531 * contiguous registers to the reg_read_buf pointer
532 *
533 * @first: offset of the first register in the contiguous block
534 * @num_regs: number of registers to read
535 */
536static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
537 int num_regs)
538{
539 bool flow_control = false;
540 void *vaddr;
541 int size;
542
543 if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
544 flow_control = true;
545
546 size = num_regs * sizeof(u32);
547 vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
548 nandc->reg_read_pos += num_regs;
549
550 return prep_dma_desc(nandc, true, first, vaddr, size, flow_control);
551}
552
553/*
554 * write_reg_dma: prepares a descriptor to write a given number of
555 * contiguous registers
556 *
557 * @first: offset of the first register in the contiguous block
558 * @num_regs: number of registers to write
559 */
560static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
561 int num_regs)
562{
563 bool flow_control = false;
564 struct nandc_regs *regs = nandc->regs;
565 void *vaddr;
566 int size;
567
568 vaddr = offset_to_nandc_reg(regs, first);
569
570 if (first == NAND_FLASH_CMD)
571 flow_control = true;
572
573 if (first == NAND_DEV_CMD1_RESTORE)
574 first = NAND_DEV_CMD1;
575
576 if (first == NAND_DEV_CMD_VLD_RESTORE)
577 first = NAND_DEV_CMD_VLD;
578
579 size = num_regs * sizeof(u32);
580
581 return prep_dma_desc(nandc, false, first, vaddr, size, flow_control);
582}
583
584/*
585 * read_data_dma: prepares a DMA descriptor to transfer data from the
586 * controller's internal buffer to the buffer 'vaddr'
587 *
588 * @reg_off: offset within the controller's data buffer
589 * @vaddr: virtual address of the buffer we want to write to
590 * @size: DMA transaction size in bytes
591 */
592static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
593 const u8 *vaddr, int size)
594{
595 return prep_dma_desc(nandc, true, reg_off, vaddr, size, false);
596}
597
598/*
599 * write_data_dma: prepares a DMA descriptor to transfer data from
600 * 'vaddr' to the controller's internal buffer
601 *
602 * @reg_off: offset within the controller's data buffer
603 * @vaddr: virtual address of the buffer we want to read from
604 * @size: DMA transaction size in bytes
605 */
606static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
607 const u8 *vaddr, int size)
608{
609 return prep_dma_desc(nandc, false, reg_off, vaddr, size, false);
610}
611
612/*
Abhishek Sahubde43302017-07-19 17:17:55 +0530613 * Helper to prepare DMA descriptors for configuring registers
614 * before reading a NAND page.
Archit Tanejac76b78d2016-02-03 14:29:50 +0530615 */
Abhishek Sahubde43302017-07-19 17:17:55 +0530616static void config_nand_page_read(struct qcom_nand_controller *nandc)
Archit Tanejac76b78d2016-02-03 14:29:50 +0530617{
Abhishek Sahubde43302017-07-19 17:17:55 +0530618 write_reg_dma(nandc, NAND_ADDR0, 2);
Archit Tanejac76b78d2016-02-03 14:29:50 +0530619 write_reg_dma(nandc, NAND_DEV0_CFG0, 3);
620 write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1);
Abhishek Sahubde43302017-07-19 17:17:55 +0530621}
Archit Tanejac76b78d2016-02-03 14:29:50 +0530622
Abhishek Sahubde43302017-07-19 17:17:55 +0530623/*
624 * Helper to prepare DMA descriptors for configuring registers
625 * before reading each codeword in NAND page.
626 */
627static void config_nand_cw_read(struct qcom_nand_controller *nandc)
628{
629 write_reg_dma(nandc, NAND_FLASH_CMD, 1);
Archit Tanejac76b78d2016-02-03 14:29:50 +0530630 write_reg_dma(nandc, NAND_EXEC_CMD, 1);
631
632 read_reg_dma(nandc, NAND_FLASH_STATUS, 2);
633 read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1);
634}
635
636/*
Abhishek Sahubde43302017-07-19 17:17:55 +0530637 * Helper to prepare dma descriptors to configure registers needed for reading a
638 * single codeword in page
Archit Tanejac76b78d2016-02-03 14:29:50 +0530639 */
Abhishek Sahubde43302017-07-19 17:17:55 +0530640static void config_nand_single_cw_page_read(struct qcom_nand_controller *nandc)
641{
642 config_nand_page_read(nandc);
643 config_nand_cw_read(nandc);
644}
645
Abhishek Sahu77cc5362017-07-19 17:17:56 +0530646/*
647 * Helper to prepare DMA descriptors used to configure registers needed for
648 * before writing a NAND page.
649 */
650static void config_nand_page_write(struct qcom_nand_controller *nandc)
Archit Tanejac76b78d2016-02-03 14:29:50 +0530651{
Abhishek Sahu77cc5362017-07-19 17:17:56 +0530652 write_reg_dma(nandc, NAND_ADDR0, 2);
Archit Tanejac76b78d2016-02-03 14:29:50 +0530653 write_reg_dma(nandc, NAND_DEV0_CFG0, 3);
654 write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1);
655}
656
Abhishek Sahu77cc5362017-07-19 17:17:56 +0530657/*
658 * Helper to prepare DMA descriptors for configuring registers
659 * before writing each codeword in NAND page.
660 */
661static void config_nand_cw_write(struct qcom_nand_controller *nandc)
Archit Tanejac76b78d2016-02-03 14:29:50 +0530662{
Abhishek Sahu77cc5362017-07-19 17:17:56 +0530663 write_reg_dma(nandc, NAND_FLASH_CMD, 1);
Archit Tanejac76b78d2016-02-03 14:29:50 +0530664 write_reg_dma(nandc, NAND_EXEC_CMD, 1);
665
666 read_reg_dma(nandc, NAND_FLASH_STATUS, 1);
667
668 write_reg_dma(nandc, NAND_FLASH_STATUS, 1);
669 write_reg_dma(nandc, NAND_READ_STATUS, 1);
670}
671
672/*
673 * the following functions are used within chip->cmdfunc() to perform different
674 * NAND_CMD_* commands
675 */
676
677/* sets up descriptors for NAND_CMD_PARAM */
678static int nandc_param(struct qcom_nand_host *host)
679{
680 struct nand_chip *chip = &host->chip;
681 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
682
683 /*
684 * NAND_CMD_PARAM is called before we know much about the FLASH chip
685 * in use. we configure the controller to perform a raw read of 512
686 * bytes to read onfi params
687 */
688 nandc_set_reg(nandc, NAND_FLASH_CMD, PAGE_READ | PAGE_ACC | LAST_PAGE);
689 nandc_set_reg(nandc, NAND_ADDR0, 0);
690 nandc_set_reg(nandc, NAND_ADDR1, 0);
691 nandc_set_reg(nandc, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
692 | 512 << UD_SIZE_BYTES
693 | 5 << NUM_ADDR_CYCLES
694 | 0 << SPARE_SIZE_BYTES);
695 nandc_set_reg(nandc, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES
696 | 0 << CS_ACTIVE_BSY
697 | 17 << BAD_BLOCK_BYTE_NUM
698 | 1 << BAD_BLOCK_IN_SPARE_AREA
699 | 2 << WR_RD_BSY_GAP
700 | 0 << WIDE_FLASH
701 | 1 << DEV0_CFG1_ECC_DISABLE);
702 nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
703
704 /* configure CMD1 and VLD for ONFI param probing */
705 nandc_set_reg(nandc, NAND_DEV_CMD_VLD,
Abhishek Sahud8a9b322017-08-11 17:09:16 +0530706 (nandc->vld & ~READ_START_VLD));
Archit Tanejac76b78d2016-02-03 14:29:50 +0530707 nandc_set_reg(nandc, NAND_DEV_CMD1,
708 (nandc->cmd1 & ~(0xFF << READ_ADDR))
709 | NAND_CMD_PARAM << READ_ADDR);
710
711 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
712
713 nandc_set_reg(nandc, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
714 nandc_set_reg(nandc, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
715
716 write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1);
717 write_reg_dma(nandc, NAND_DEV_CMD1, 1);
718
719 nandc->buf_count = 512;
720 memset(nandc->data_buffer, 0xff, nandc->buf_count);
721
Abhishek Sahubde43302017-07-19 17:17:55 +0530722 config_nand_single_cw_page_read(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +0530723
724 read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
725 nandc->buf_count);
726
727 /* restore CMD1 and VLD regs */
728 write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1);
729 write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1);
730
731 return 0;
732}
733
734/* sets up descriptors for NAND_CMD_ERASE1 */
735static int erase_block(struct qcom_nand_host *host, int page_addr)
736{
737 struct nand_chip *chip = &host->chip;
738 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
739
740 nandc_set_reg(nandc, NAND_FLASH_CMD,
741 BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
742 nandc_set_reg(nandc, NAND_ADDR0, page_addr);
743 nandc_set_reg(nandc, NAND_ADDR1, 0);
744 nandc_set_reg(nandc, NAND_DEV0_CFG0,
745 host->cfg0_raw & ~(7 << CW_PER_PAGE));
746 nandc_set_reg(nandc, NAND_DEV0_CFG1, host->cfg1_raw);
747 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
748 nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
749 nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
750
751 write_reg_dma(nandc, NAND_FLASH_CMD, 3);
752 write_reg_dma(nandc, NAND_DEV0_CFG0, 2);
753 write_reg_dma(nandc, NAND_EXEC_CMD, 1);
754
755 read_reg_dma(nandc, NAND_FLASH_STATUS, 1);
756
757 write_reg_dma(nandc, NAND_FLASH_STATUS, 1);
758 write_reg_dma(nandc, NAND_READ_STATUS, 1);
759
760 return 0;
761}
762
763/* sets up descriptors for NAND_CMD_READID */
764static int read_id(struct qcom_nand_host *host, int column)
765{
766 struct nand_chip *chip = &host->chip;
767 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
768
769 if (column == -1)
770 return 0;
771
772 nandc_set_reg(nandc, NAND_FLASH_CMD, FETCH_ID);
773 nandc_set_reg(nandc, NAND_ADDR0, column);
774 nandc_set_reg(nandc, NAND_ADDR1, 0);
775 nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
776 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
777
778 write_reg_dma(nandc, NAND_FLASH_CMD, 4);
779 write_reg_dma(nandc, NAND_EXEC_CMD, 1);
780
781 read_reg_dma(nandc, NAND_READ_ID, 1);
782
783 return 0;
784}
785
786/* sets up descriptors for NAND_CMD_RESET */
787static int reset(struct qcom_nand_host *host)
788{
789 struct nand_chip *chip = &host->chip;
790 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
791
792 nandc_set_reg(nandc, NAND_FLASH_CMD, RESET_DEVICE);
793 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
794
795 write_reg_dma(nandc, NAND_FLASH_CMD, 1);
796 write_reg_dma(nandc, NAND_EXEC_CMD, 1);
797
798 read_reg_dma(nandc, NAND_FLASH_STATUS, 1);
799
800 return 0;
801}
802
803/* helpers to submit/free our list of dma descriptors */
804static int submit_descs(struct qcom_nand_controller *nandc)
805{
806 struct desc_info *desc;
807 dma_cookie_t cookie = 0;
808
809 list_for_each_entry(desc, &nandc->desc_list, node)
810 cookie = dmaengine_submit(desc->dma_desc);
811
812 if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
813 return -ETIMEDOUT;
814
815 return 0;
816}
817
818static void free_descs(struct qcom_nand_controller *nandc)
819{
820 struct desc_info *desc, *n;
821
822 list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
823 list_del(&desc->node);
824 dma_unmap_sg(nandc->dev, &desc->sgl, 1, desc->dir);
825 kfree(desc);
826 }
827}
828
829/* reset the register read buffer for next NAND operation */
830static void clear_read_regs(struct qcom_nand_controller *nandc)
831{
832 nandc->reg_read_pos = 0;
Archit Tanejac76b78d2016-02-03 14:29:50 +0530833}
834
835static void pre_command(struct qcom_nand_host *host, int command)
836{
837 struct nand_chip *chip = &host->chip;
838 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
839
840 nandc->buf_count = 0;
841 nandc->buf_start = 0;
842 host->use_ecc = false;
843 host->last_command = command;
844
845 clear_read_regs(nandc);
846}
847
848/*
849 * this is called after NAND_CMD_PAGEPROG and NAND_CMD_ERASE1 to set our
850 * privately maintained status byte, this status byte can be read after
851 * NAND_CMD_STATUS is called
852 */
853static void parse_erase_write_errors(struct qcom_nand_host *host, int command)
854{
855 struct nand_chip *chip = &host->chip;
856 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
857 struct nand_ecc_ctrl *ecc = &chip->ecc;
858 int num_cw;
859 int i;
860
861 num_cw = command == NAND_CMD_PAGEPROG ? ecc->steps : 1;
862
863 for (i = 0; i < num_cw; i++) {
864 u32 flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
865
866 if (flash_status & FS_MPU_ERR)
867 host->status &= ~NAND_STATUS_WP;
868
869 if (flash_status & FS_OP_ERR || (i == (num_cw - 1) &&
870 (flash_status &
871 FS_DEVICE_STS_ERR)))
872 host->status |= NAND_STATUS_FAIL;
873 }
874}
875
876static void post_command(struct qcom_nand_host *host, int command)
877{
878 struct nand_chip *chip = &host->chip;
879 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
880
881 switch (command) {
882 case NAND_CMD_READID:
883 memcpy(nandc->data_buffer, nandc->reg_read_buf,
884 nandc->buf_count);
885 break;
886 case NAND_CMD_PAGEPROG:
887 case NAND_CMD_ERASE1:
888 parse_erase_write_errors(host, command);
889 break;
890 default:
891 break;
892 }
893}
894
895/*
896 * Implements chip->cmdfunc. It's only used for a limited set of commands.
897 * The rest of the commands wouldn't be called by upper layers. For example,
898 * NAND_CMD_READOOB would never be called because we have our own versions
899 * of read_oob ops for nand_ecc_ctrl.
900 */
901static void qcom_nandc_command(struct mtd_info *mtd, unsigned int command,
902 int column, int page_addr)
903{
904 struct nand_chip *chip = mtd_to_nand(mtd);
905 struct qcom_nand_host *host = to_qcom_nand_host(chip);
906 struct nand_ecc_ctrl *ecc = &chip->ecc;
907 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
908 bool wait = false;
909 int ret = 0;
910
911 pre_command(host, command);
912
913 switch (command) {
914 case NAND_CMD_RESET:
915 ret = reset(host);
916 wait = true;
917 break;
918
919 case NAND_CMD_READID:
920 nandc->buf_count = 4;
921 ret = read_id(host, column);
922 wait = true;
923 break;
924
925 case NAND_CMD_PARAM:
926 ret = nandc_param(host);
927 wait = true;
928 break;
929
930 case NAND_CMD_ERASE1:
931 ret = erase_block(host, page_addr);
932 wait = true;
933 break;
934
935 case NAND_CMD_READ0:
936 /* we read the entire page for now */
937 WARN_ON(column != 0);
938
939 host->use_ecc = true;
940 set_address(host, 0, page_addr);
941 update_rw_regs(host, ecc->steps, true);
942 break;
943
944 case NAND_CMD_SEQIN:
945 WARN_ON(column != 0);
946 set_address(host, 0, page_addr);
947 break;
948
949 case NAND_CMD_PAGEPROG:
950 case NAND_CMD_STATUS:
951 case NAND_CMD_NONE:
952 default:
953 break;
954 }
955
956 if (ret) {
957 dev_err(nandc->dev, "failure executing command %d\n",
958 command);
959 free_descs(nandc);
960 return;
961 }
962
963 if (wait) {
964 ret = submit_descs(nandc);
965 if (ret)
966 dev_err(nandc->dev,
967 "failure submitting descs for command %d\n",
968 command);
969 }
970
971 free_descs(nandc);
972
973 post_command(host, command);
974}
975
976/*
977 * when using BCH ECC, the HW flags an error in NAND_FLASH_STATUS if it read
978 * an erased CW, and reports an erased CW in NAND_ERASED_CW_DETECT_STATUS.
979 *
980 * when using RS ECC, the HW reports the same erros when reading an erased CW,
981 * but it notifies that it is an erased CW by placing special characters at
982 * certain offsets in the buffer.
983 *
984 * verify if the page is erased or not, and fix up the page for RS ECC by
985 * replacing the special characters with 0xff.
986 */
987static bool erased_chunk_check_and_fixup(u8 *data_buf, int data_len)
988{
989 u8 empty1, empty2;
990
991 /*
992 * an erased page flags an error in NAND_FLASH_STATUS, check if the page
993 * is erased by looking for 0x54s at offsets 3 and 175 from the
994 * beginning of each codeword
995 */
996
997 empty1 = data_buf[3];
998 empty2 = data_buf[175];
999
1000 /*
1001 * if the erased codework markers, if they exist override them with
1002 * 0xffs
1003 */
1004 if ((empty1 == 0x54 && empty2 == 0xff) ||
1005 (empty1 == 0xff && empty2 == 0x54)) {
1006 data_buf[3] = 0xff;
1007 data_buf[175] = 0xff;
1008 }
1009
1010 /*
1011 * check if the entire chunk contains 0xffs or not. if it doesn't, then
1012 * restore the original values at the special offsets
1013 */
1014 if (memchr_inv(data_buf, 0xff, data_len)) {
1015 data_buf[3] = empty1;
1016 data_buf[175] = empty2;
1017
1018 return false;
1019 }
1020
1021 return true;
1022}
1023
1024struct read_stats {
1025 __le32 flash;
1026 __le32 buffer;
1027 __le32 erased_cw;
1028};
1029
1030/*
1031 * reads back status registers set by the controller to notify page read
1032 * errors. this is equivalent to what 'ecc->correct()' would do.
1033 */
1034static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
1035 u8 *oob_buf)
1036{
1037 struct nand_chip *chip = &host->chip;
1038 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1039 struct mtd_info *mtd = nand_to_mtd(chip);
1040 struct nand_ecc_ctrl *ecc = &chip->ecc;
1041 unsigned int max_bitflips = 0;
1042 struct read_stats *buf;
1043 int i;
1044
1045 buf = (struct read_stats *)nandc->reg_read_buf;
1046
1047 for (i = 0; i < ecc->steps; i++, buf++) {
1048 u32 flash, buffer, erased_cw;
1049 int data_len, oob_len;
1050
1051 if (i == (ecc->steps - 1)) {
1052 data_len = ecc->size - ((ecc->steps - 1) << 2);
1053 oob_len = ecc->steps << 2;
1054 } else {
1055 data_len = host->cw_data;
1056 oob_len = 0;
1057 }
1058
1059 flash = le32_to_cpu(buf->flash);
1060 buffer = le32_to_cpu(buf->buffer);
1061 erased_cw = le32_to_cpu(buf->erased_cw);
1062
1063 if (flash & (FS_OP_ERR | FS_MPU_ERR)) {
1064 bool erased;
1065
1066 /* ignore erased codeword errors */
1067 if (host->bch_enabled) {
1068 erased = (erased_cw & ERASED_CW) == ERASED_CW ?
1069 true : false;
1070 } else {
1071 erased = erased_chunk_check_and_fixup(data_buf,
1072 data_len);
1073 }
1074
1075 if (erased) {
1076 data_buf += data_len;
1077 if (oob_buf)
1078 oob_buf += oob_len + ecc->bytes;
1079 continue;
1080 }
1081
1082 if (buffer & BS_UNCORRECTABLE_BIT) {
1083 int ret, ecclen, extraooblen;
1084 void *eccbuf;
1085
1086 eccbuf = oob_buf ? oob_buf + oob_len : NULL;
1087 ecclen = oob_buf ? host->ecc_bytes_hw : 0;
1088 extraooblen = oob_buf ? oob_len : 0;
1089
1090 /*
1091 * make sure it isn't an erased page reported
1092 * as not-erased by HW because of a few bitflips
1093 */
1094 ret = nand_check_erased_ecc_chunk(data_buf,
1095 data_len, eccbuf, ecclen, oob_buf,
1096 extraooblen, ecc->strength);
1097 if (ret < 0) {
1098 mtd->ecc_stats.failed++;
1099 } else {
1100 mtd->ecc_stats.corrected += ret;
1101 max_bitflips =
1102 max_t(unsigned int, max_bitflips, ret);
1103 }
1104 }
1105 } else {
1106 unsigned int stat;
1107
1108 stat = buffer & BS_CORRECTABLE_ERR_MSK;
1109 mtd->ecc_stats.corrected += stat;
1110 max_bitflips = max(max_bitflips, stat);
1111 }
1112
1113 data_buf += data_len;
1114 if (oob_buf)
1115 oob_buf += oob_len + ecc->bytes;
1116 }
1117
1118 return max_bitflips;
1119}
1120
1121/*
1122 * helper to perform the actual page read operation, used by ecc->read_page(),
1123 * ecc->read_oob()
1124 */
1125static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
1126 u8 *oob_buf)
1127{
1128 struct nand_chip *chip = &host->chip;
1129 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1130 struct nand_ecc_ctrl *ecc = &chip->ecc;
1131 int i, ret;
1132
Abhishek Sahubde43302017-07-19 17:17:55 +05301133 config_nand_page_read(nandc);
1134
Archit Tanejac76b78d2016-02-03 14:29:50 +05301135 /* queue cmd descs for each codeword */
1136 for (i = 0; i < ecc->steps; i++) {
1137 int data_size, oob_size;
1138
1139 if (i == (ecc->steps - 1)) {
1140 data_size = ecc->size - ((ecc->steps - 1) << 2);
1141 oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
1142 host->spare_bytes;
1143 } else {
1144 data_size = host->cw_data;
1145 oob_size = host->ecc_bytes_hw + host->spare_bytes;
1146 }
1147
Abhishek Sahubde43302017-07-19 17:17:55 +05301148 config_nand_cw_read(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301149
1150 if (data_buf)
1151 read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
1152 data_size);
1153
1154 /*
1155 * when ecc is enabled, the controller doesn't read the real
1156 * or dummy bad block markers in each chunk. To maintain a
1157 * consistent layout across RAW and ECC reads, we just
1158 * leave the real/dummy BBM offsets empty (i.e, filled with
1159 * 0xffs)
1160 */
1161 if (oob_buf) {
1162 int j;
1163
1164 for (j = 0; j < host->bbm_size; j++)
1165 *oob_buf++ = 0xff;
1166
1167 read_data_dma(nandc, FLASH_BUF_ACC + data_size,
1168 oob_buf, oob_size);
1169 }
1170
1171 if (data_buf)
1172 data_buf += data_size;
1173 if (oob_buf)
1174 oob_buf += oob_size;
1175 }
1176
1177 ret = submit_descs(nandc);
1178 if (ret)
1179 dev_err(nandc->dev, "failure to read page/oob\n");
1180
1181 free_descs(nandc);
1182
1183 return ret;
1184}
1185
1186/*
1187 * a helper that copies the last step/codeword of a page (containing free oob)
1188 * into our local buffer
1189 */
1190static int copy_last_cw(struct qcom_nand_host *host, int page)
1191{
1192 struct nand_chip *chip = &host->chip;
1193 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1194 struct nand_ecc_ctrl *ecc = &chip->ecc;
1195 int size;
1196 int ret;
1197
1198 clear_read_regs(nandc);
1199
1200 size = host->use_ecc ? host->cw_data : host->cw_size;
1201
1202 /* prepare a clean read buffer */
1203 memset(nandc->data_buffer, 0xff, size);
1204
1205 set_address(host, host->cw_size * (ecc->steps - 1), page);
1206 update_rw_regs(host, 1, true);
1207
Abhishek Sahubde43302017-07-19 17:17:55 +05301208 config_nand_single_cw_page_read(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301209
1210 read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size);
1211
1212 ret = submit_descs(nandc);
1213 if (ret)
1214 dev_err(nandc->dev, "failed to copy last codeword\n");
1215
1216 free_descs(nandc);
1217
1218 return ret;
1219}
1220
1221/* implements ecc->read_page() */
1222static int qcom_nandc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1223 uint8_t *buf, int oob_required, int page)
1224{
1225 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1226 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1227 u8 *data_buf, *oob_buf = NULL;
1228 int ret;
1229
1230 data_buf = buf;
1231 oob_buf = oob_required ? chip->oob_poi : NULL;
1232
1233 ret = read_page_ecc(host, data_buf, oob_buf);
1234 if (ret) {
1235 dev_err(nandc->dev, "failure to read page\n");
1236 return ret;
1237 }
1238
1239 return parse_read_errors(host, data_buf, oob_buf);
1240}
1241
1242/* implements ecc->read_page_raw() */
1243static int qcom_nandc_read_page_raw(struct mtd_info *mtd,
1244 struct nand_chip *chip, uint8_t *buf,
1245 int oob_required, int page)
1246{
1247 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1248 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1249 u8 *data_buf, *oob_buf;
1250 struct nand_ecc_ctrl *ecc = &chip->ecc;
1251 int i, ret;
1252
1253 data_buf = buf;
1254 oob_buf = chip->oob_poi;
1255
1256 host->use_ecc = false;
1257 update_rw_regs(host, ecc->steps, true);
Abhishek Sahubde43302017-07-19 17:17:55 +05301258 config_nand_page_read(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301259
1260 for (i = 0; i < ecc->steps; i++) {
1261 int data_size1, data_size2, oob_size1, oob_size2;
1262 int reg_off = FLASH_BUF_ACC;
1263
1264 data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
1265 oob_size1 = host->bbm_size;
1266
1267 if (i == (ecc->steps - 1)) {
1268 data_size2 = ecc->size - data_size1 -
1269 ((ecc->steps - 1) << 2);
1270 oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
1271 host->spare_bytes;
1272 } else {
1273 data_size2 = host->cw_data - data_size1;
1274 oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
1275 }
1276
Abhishek Sahubde43302017-07-19 17:17:55 +05301277 config_nand_cw_read(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301278
1279 read_data_dma(nandc, reg_off, data_buf, data_size1);
1280 reg_off += data_size1;
1281 data_buf += data_size1;
1282
1283 read_data_dma(nandc, reg_off, oob_buf, oob_size1);
1284 reg_off += oob_size1;
1285 oob_buf += oob_size1;
1286
1287 read_data_dma(nandc, reg_off, data_buf, data_size2);
1288 reg_off += data_size2;
1289 data_buf += data_size2;
1290
1291 read_data_dma(nandc, reg_off, oob_buf, oob_size2);
1292 oob_buf += oob_size2;
1293 }
1294
1295 ret = submit_descs(nandc);
1296 if (ret)
1297 dev_err(nandc->dev, "failure to read raw page\n");
1298
1299 free_descs(nandc);
1300
1301 return 0;
1302}
1303
1304/* implements ecc->read_oob() */
1305static int qcom_nandc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
1306 int page)
1307{
1308 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1309 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1310 struct nand_ecc_ctrl *ecc = &chip->ecc;
1311 int ret;
1312
1313 clear_read_regs(nandc);
1314
1315 host->use_ecc = true;
1316 set_address(host, 0, page);
1317 update_rw_regs(host, ecc->steps, true);
1318
1319 ret = read_page_ecc(host, NULL, chip->oob_poi);
1320 if (ret)
1321 dev_err(nandc->dev, "failure to read oob\n");
1322
1323 return ret;
1324}
1325
1326/* implements ecc->write_page() */
1327static int qcom_nandc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
1328 const uint8_t *buf, int oob_required, int page)
1329{
1330 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1331 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1332 struct nand_ecc_ctrl *ecc = &chip->ecc;
1333 u8 *data_buf, *oob_buf;
1334 int i, ret;
1335
1336 clear_read_regs(nandc);
1337
1338 data_buf = (u8 *)buf;
1339 oob_buf = chip->oob_poi;
1340
1341 host->use_ecc = true;
1342 update_rw_regs(host, ecc->steps, false);
Abhishek Sahu77cc5362017-07-19 17:17:56 +05301343 config_nand_page_write(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301344
1345 for (i = 0; i < ecc->steps; i++) {
1346 int data_size, oob_size;
1347
1348 if (i == (ecc->steps - 1)) {
1349 data_size = ecc->size - ((ecc->steps - 1) << 2);
1350 oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
1351 host->spare_bytes;
1352 } else {
1353 data_size = host->cw_data;
1354 oob_size = ecc->bytes;
1355 }
1356
Archit Tanejac76b78d2016-02-03 14:29:50 +05301357
1358 write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size);
1359
1360 /*
1361 * when ECC is enabled, we don't really need to write anything
1362 * to oob for the first n - 1 codewords since these oob regions
1363 * just contain ECC bytes that's written by the controller
1364 * itself. For the last codeword, we skip the bbm positions and
1365 * write to the free oob area.
1366 */
1367 if (i == (ecc->steps - 1)) {
1368 oob_buf += host->bbm_size;
1369
1370 write_data_dma(nandc, FLASH_BUF_ACC + data_size,
1371 oob_buf, oob_size);
1372 }
1373
Abhishek Sahu77cc5362017-07-19 17:17:56 +05301374 config_nand_cw_write(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301375
1376 data_buf += data_size;
1377 oob_buf += oob_size;
1378 }
1379
1380 ret = submit_descs(nandc);
1381 if (ret)
1382 dev_err(nandc->dev, "failure to write page\n");
1383
1384 free_descs(nandc);
1385
1386 return ret;
1387}
1388
1389/* implements ecc->write_page_raw() */
1390static int qcom_nandc_write_page_raw(struct mtd_info *mtd,
1391 struct nand_chip *chip, const uint8_t *buf,
1392 int oob_required, int page)
1393{
1394 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1395 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1396 struct nand_ecc_ctrl *ecc = &chip->ecc;
1397 u8 *data_buf, *oob_buf;
1398 int i, ret;
1399
1400 clear_read_regs(nandc);
1401
1402 data_buf = (u8 *)buf;
1403 oob_buf = chip->oob_poi;
1404
1405 host->use_ecc = false;
1406 update_rw_regs(host, ecc->steps, false);
Abhishek Sahu77cc5362017-07-19 17:17:56 +05301407 config_nand_page_write(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301408
1409 for (i = 0; i < ecc->steps; i++) {
1410 int data_size1, data_size2, oob_size1, oob_size2;
1411 int reg_off = FLASH_BUF_ACC;
1412
1413 data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
1414 oob_size1 = host->bbm_size;
1415
1416 if (i == (ecc->steps - 1)) {
1417 data_size2 = ecc->size - data_size1 -
1418 ((ecc->steps - 1) << 2);
1419 oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
1420 host->spare_bytes;
1421 } else {
1422 data_size2 = host->cw_data - data_size1;
1423 oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
1424 }
1425
Archit Tanejac76b78d2016-02-03 14:29:50 +05301426 write_data_dma(nandc, reg_off, data_buf, data_size1);
1427 reg_off += data_size1;
1428 data_buf += data_size1;
1429
1430 write_data_dma(nandc, reg_off, oob_buf, oob_size1);
1431 reg_off += oob_size1;
1432 oob_buf += oob_size1;
1433
1434 write_data_dma(nandc, reg_off, data_buf, data_size2);
1435 reg_off += data_size2;
1436 data_buf += data_size2;
1437
1438 write_data_dma(nandc, reg_off, oob_buf, oob_size2);
1439 oob_buf += oob_size2;
1440
Abhishek Sahu77cc5362017-07-19 17:17:56 +05301441 config_nand_cw_write(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301442 }
1443
1444 ret = submit_descs(nandc);
1445 if (ret)
1446 dev_err(nandc->dev, "failure to write raw page\n");
1447
1448 free_descs(nandc);
1449
1450 return ret;
1451}
1452
1453/*
1454 * implements ecc->write_oob()
1455 *
1456 * the NAND controller cannot write only data or only oob within a codeword,
1457 * since ecc is calculated for the combined codeword. we first copy the
1458 * entire contents for the last codeword(data + oob), replace the old oob
1459 * with the new one in chip->oob_poi, and then write the entire codeword.
1460 * this read-copy-write operation results in a slight performance loss.
1461 */
1462static int qcom_nandc_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
1463 int page)
1464{
1465 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1466 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1467 struct nand_ecc_ctrl *ecc = &chip->ecc;
1468 u8 *oob = chip->oob_poi;
Archit Tanejac76b78d2016-02-03 14:29:50 +05301469 int data_size, oob_size;
1470 int ret, status = 0;
1471
1472 host->use_ecc = true;
1473
1474 ret = copy_last_cw(host, page);
1475 if (ret)
1476 return ret;
1477
1478 clear_read_regs(nandc);
1479
1480 /* calculate the data and oob size for the last codeword/step */
1481 data_size = ecc->size - ((ecc->steps - 1) << 2);
Boris Brezillonaa02fcf2016-03-18 17:53:31 +01001482 oob_size = mtd->oobavail;
Archit Tanejac76b78d2016-02-03 14:29:50 +05301483
1484 /* override new oob content to last codeword */
Boris Brezillonaa02fcf2016-03-18 17:53:31 +01001485 mtd_ooblayout_get_databytes(mtd, nandc->data_buffer + data_size, oob,
1486 0, mtd->oobavail);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301487
1488 set_address(host, host->cw_size * (ecc->steps - 1), page);
1489 update_rw_regs(host, 1, false);
1490
Abhishek Sahu77cc5362017-07-19 17:17:56 +05301491 config_nand_page_write(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301492 write_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
1493 data_size + oob_size);
Abhishek Sahu77cc5362017-07-19 17:17:56 +05301494 config_nand_cw_write(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301495
1496 ret = submit_descs(nandc);
1497
1498 free_descs(nandc);
1499
1500 if (ret) {
1501 dev_err(nandc->dev, "failure to write oob\n");
1502 return -EIO;
1503 }
1504
1505 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1506
1507 status = chip->waitfunc(mtd, chip);
1508
1509 return status & NAND_STATUS_FAIL ? -EIO : 0;
1510}
1511
1512static int qcom_nandc_block_bad(struct mtd_info *mtd, loff_t ofs)
1513{
1514 struct nand_chip *chip = mtd_to_nand(mtd);
1515 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1516 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1517 struct nand_ecc_ctrl *ecc = &chip->ecc;
1518 int page, ret, bbpos, bad = 0;
1519 u32 flash_status;
1520
1521 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
1522
1523 /*
1524 * configure registers for a raw sub page read, the address is set to
1525 * the beginning of the last codeword, we don't care about reading ecc
1526 * portion of oob. we just want the first few bytes from this codeword
1527 * that contains the BBM
1528 */
1529 host->use_ecc = false;
1530
1531 ret = copy_last_cw(host, page);
1532 if (ret)
1533 goto err;
1534
1535 flash_status = le32_to_cpu(nandc->reg_read_buf[0]);
1536
1537 if (flash_status & (FS_OP_ERR | FS_MPU_ERR)) {
1538 dev_warn(nandc->dev, "error when trying to read BBM\n");
1539 goto err;
1540 }
1541
1542 bbpos = mtd->writesize - host->cw_size * (ecc->steps - 1);
1543
1544 bad = nandc->data_buffer[bbpos] != 0xff;
1545
1546 if (chip->options & NAND_BUSWIDTH_16)
1547 bad = bad || (nandc->data_buffer[bbpos + 1] != 0xff);
1548err:
1549 return bad;
1550}
1551
1552static int qcom_nandc_block_markbad(struct mtd_info *mtd, loff_t ofs)
1553{
1554 struct nand_chip *chip = mtd_to_nand(mtd);
1555 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1556 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1557 struct nand_ecc_ctrl *ecc = &chip->ecc;
1558 int page, ret, status = 0;
1559
1560 clear_read_regs(nandc);
1561
1562 /*
1563 * to mark the BBM as bad, we flash the entire last codeword with 0s.
1564 * we don't care about the rest of the content in the codeword since
1565 * we aren't going to use this block again
1566 */
1567 memset(nandc->data_buffer, 0x00, host->cw_size);
1568
1569 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
1570
1571 /* prepare write */
1572 host->use_ecc = false;
1573 set_address(host, host->cw_size * (ecc->steps - 1), page);
1574 update_rw_regs(host, 1, false);
1575
Abhishek Sahu77cc5362017-07-19 17:17:56 +05301576 config_nand_page_write(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301577 write_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, host->cw_size);
Abhishek Sahu77cc5362017-07-19 17:17:56 +05301578 config_nand_cw_write(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301579
1580 ret = submit_descs(nandc);
1581
1582 free_descs(nandc);
1583
1584 if (ret) {
1585 dev_err(nandc->dev, "failure to update BBM\n");
1586 return -EIO;
1587 }
1588
1589 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1590
1591 status = chip->waitfunc(mtd, chip);
1592
1593 return status & NAND_STATUS_FAIL ? -EIO : 0;
1594}
1595
1596/*
1597 * the three functions below implement chip->read_byte(), chip->read_buf()
1598 * and chip->write_buf() respectively. these aren't used for
1599 * reading/writing page data, they are used for smaller data like reading
1600 * id, status etc
1601 */
1602static uint8_t qcom_nandc_read_byte(struct mtd_info *mtd)
1603{
1604 struct nand_chip *chip = mtd_to_nand(mtd);
1605 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1606 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1607 u8 *buf = nandc->data_buffer;
1608 u8 ret = 0x0;
1609
1610 if (host->last_command == NAND_CMD_STATUS) {
1611 ret = host->status;
1612
1613 host->status = NAND_STATUS_READY | NAND_STATUS_WP;
1614
1615 return ret;
1616 }
1617
1618 if (nandc->buf_start < nandc->buf_count)
1619 ret = buf[nandc->buf_start++];
1620
1621 return ret;
1622}
1623
1624static void qcom_nandc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1625{
1626 struct nand_chip *chip = mtd_to_nand(mtd);
1627 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1628 int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
1629
1630 memcpy(buf, nandc->data_buffer + nandc->buf_start, real_len);
1631 nandc->buf_start += real_len;
1632}
1633
1634static void qcom_nandc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
1635 int len)
1636{
1637 struct nand_chip *chip = mtd_to_nand(mtd);
1638 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1639 int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
1640
1641 memcpy(nandc->data_buffer + nandc->buf_start, buf, real_len);
1642
1643 nandc->buf_start += real_len;
1644}
1645
1646/* we support only one external chip for now */
1647static void qcom_nandc_select_chip(struct mtd_info *mtd, int chipnr)
1648{
1649 struct nand_chip *chip = mtd_to_nand(mtd);
1650 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1651
1652 if (chipnr <= 0)
1653 return;
1654
1655 dev_warn(nandc->dev, "invalid chip select\n");
1656}
1657
1658/*
1659 * NAND controller page layout info
1660 *
1661 * Layout with ECC enabled:
1662 *
1663 * |----------------------| |---------------------------------|
1664 * | xx.......yy| | *********xx.......yy|
1665 * | DATA xx..ECC..yy| | DATA **SPARE**xx..ECC..yy|
1666 * | (516) xx.......yy| | (516-n*4) **(n*4)**xx.......yy|
1667 * | xx.......yy| | *********xx.......yy|
1668 * |----------------------| |---------------------------------|
1669 * codeword 1,2..n-1 codeword n
1670 * <---(528/532 Bytes)--> <-------(528/532 Bytes)--------->
1671 *
1672 * n = Number of codewords in the page
1673 * . = ECC bytes
1674 * * = Spare/free bytes
1675 * x = Unused byte(s)
1676 * y = Reserved byte(s)
1677 *
1678 * 2K page: n = 4, spare = 16 bytes
1679 * 4K page: n = 8, spare = 32 bytes
1680 * 8K page: n = 16, spare = 64 bytes
1681 *
1682 * the qcom nand controller operates at a sub page/codeword level. each
1683 * codeword is 528 and 532 bytes for 4 bit and 8 bit ECC modes respectively.
1684 * the number of ECC bytes vary based on the ECC strength and the bus width.
1685 *
1686 * the first n - 1 codewords contains 516 bytes of user data, the remaining
1687 * 12/16 bytes consist of ECC and reserved data. The nth codeword contains
1688 * both user data and spare(oobavail) bytes that sum up to 516 bytes.
1689 *
1690 * When we access a page with ECC enabled, the reserved bytes(s) are not
1691 * accessible at all. When reading, we fill up these unreadable positions
1692 * with 0xffs. When writing, the controller skips writing the inaccessible
1693 * bytes.
1694 *
1695 * Layout with ECC disabled:
1696 *
1697 * |------------------------------| |---------------------------------------|
1698 * | yy xx.......| | bb *********xx.......|
1699 * | DATA1 yy DATA2 xx..ECC..| | DATA1 bb DATA2 **SPARE**xx..ECC..|
1700 * | (size1) yy (size2) xx.......| | (size1) bb (size2) **(n*4)**xx.......|
1701 * | yy xx.......| | bb *********xx.......|
1702 * |------------------------------| |---------------------------------------|
1703 * codeword 1,2..n-1 codeword n
1704 * <-------(528/532 Bytes)------> <-----------(528/532 Bytes)----------->
1705 *
1706 * n = Number of codewords in the page
1707 * . = ECC bytes
1708 * * = Spare/free bytes
1709 * x = Unused byte(s)
1710 * y = Dummy Bad Bock byte(s)
1711 * b = Real Bad Block byte(s)
1712 * size1/size2 = function of codeword size and 'n'
1713 *
1714 * when the ECC block is disabled, one reserved byte (or two for 16 bit bus
1715 * width) is now accessible. For the first n - 1 codewords, these are dummy Bad
1716 * Block Markers. In the last codeword, this position contains the real BBM
1717 *
1718 * In order to have a consistent layout between RAW and ECC modes, we assume
1719 * the following OOB layout arrangement:
1720 *
1721 * |-----------| |--------------------|
1722 * |yyxx.......| |bb*********xx.......|
1723 * |yyxx..ECC..| |bb*FREEOOB*xx..ECC..|
1724 * |yyxx.......| |bb*********xx.......|
1725 * |yyxx.......| |bb*********xx.......|
1726 * |-----------| |--------------------|
1727 * first n - 1 nth OOB region
1728 * OOB regions
1729 *
1730 * n = Number of codewords in the page
1731 * . = ECC bytes
1732 * * = FREE OOB bytes
1733 * y = Dummy bad block byte(s) (inaccessible when ECC enabled)
1734 * x = Unused byte(s)
1735 * b = Real bad block byte(s) (inaccessible when ECC enabled)
1736 *
1737 * This layout is read as is when ECC is disabled. When ECC is enabled, the
1738 * inaccessible Bad Block byte(s) are ignored when we write to a page/oob,
1739 * and assumed as 0xffs when we read a page/oob. The ECC, unused and
Boris Brezillon421e81c2016-03-18 17:54:27 +01001740 * dummy/real bad block bytes are grouped as ecc bytes (i.e, ecc->bytes is
1741 * the sum of the three).
Archit Tanejac76b78d2016-02-03 14:29:50 +05301742 */
Boris Brezillon421e81c2016-03-18 17:54:27 +01001743static int qcom_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
1744 struct mtd_oob_region *oobregion)
Archit Tanejac76b78d2016-02-03 14:29:50 +05301745{
Boris Brezillon421e81c2016-03-18 17:54:27 +01001746 struct nand_chip *chip = mtd_to_nand(mtd);
1747 struct qcom_nand_host *host = to_qcom_nand_host(chip);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301748 struct nand_ecc_ctrl *ecc = &chip->ecc;
Archit Tanejac76b78d2016-02-03 14:29:50 +05301749
Boris Brezillon421e81c2016-03-18 17:54:27 +01001750 if (section > 1)
1751 return -ERANGE;
Archit Tanejac76b78d2016-02-03 14:29:50 +05301752
Boris Brezillon421e81c2016-03-18 17:54:27 +01001753 if (!section) {
1754 oobregion->length = (ecc->bytes * (ecc->steps - 1)) +
1755 host->bbm_size;
1756 oobregion->offset = 0;
1757 } else {
1758 oobregion->length = host->ecc_bytes_hw + host->spare_bytes;
1759 oobregion->offset = mtd->oobsize - oobregion->length;
Archit Tanejac76b78d2016-02-03 14:29:50 +05301760 }
1761
Boris Brezillon421e81c2016-03-18 17:54:27 +01001762 return 0;
Archit Tanejac76b78d2016-02-03 14:29:50 +05301763}
1764
Boris Brezillon421e81c2016-03-18 17:54:27 +01001765static int qcom_nand_ooblayout_free(struct mtd_info *mtd, int section,
1766 struct mtd_oob_region *oobregion)
1767{
1768 struct nand_chip *chip = mtd_to_nand(mtd);
1769 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1770 struct nand_ecc_ctrl *ecc = &chip->ecc;
1771
1772 if (section)
1773 return -ERANGE;
1774
1775 oobregion->length = ecc->steps * 4;
1776 oobregion->offset = ((ecc->steps - 1) * ecc->bytes) + host->bbm_size;
1777
1778 return 0;
1779}
1780
1781static const struct mtd_ooblayout_ops qcom_nand_ooblayout_ops = {
1782 .ecc = qcom_nand_ooblayout_ecc,
1783 .free = qcom_nand_ooblayout_free,
1784};
1785
Archit Tanejac76b78d2016-02-03 14:29:50 +05301786static int qcom_nand_host_setup(struct qcom_nand_host *host)
1787{
1788 struct nand_chip *chip = &host->chip;
1789 struct mtd_info *mtd = nand_to_mtd(chip);
1790 struct nand_ecc_ctrl *ecc = &chip->ecc;
1791 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1792 int cwperpage, bad_block_byte;
1793 bool wide_bus;
1794 int ecc_mode = 1;
1795
1796 /*
1797 * the controller requires each step consists of 512 bytes of data.
1798 * bail out if DT has populated a wrong step size.
1799 */
1800 if (ecc->size != NANDC_STEP_SIZE) {
1801 dev_err(nandc->dev, "invalid ecc size\n");
1802 return -EINVAL;
1803 }
1804
1805 wide_bus = chip->options & NAND_BUSWIDTH_16 ? true : false;
1806
1807 if (ecc->strength >= 8) {
1808 /* 8 bit ECC defaults to BCH ECC on all platforms */
1809 host->bch_enabled = true;
1810 ecc_mode = 1;
1811
1812 if (wide_bus) {
1813 host->ecc_bytes_hw = 14;
1814 host->spare_bytes = 0;
1815 host->bbm_size = 2;
1816 } else {
1817 host->ecc_bytes_hw = 13;
1818 host->spare_bytes = 2;
1819 host->bbm_size = 1;
1820 }
1821 } else {
1822 /*
1823 * if the controller supports BCH for 4 bit ECC, the controller
1824 * uses lesser bytes for ECC. If RS is used, the ECC bytes is
1825 * always 10 bytes
1826 */
1827 if (nandc->ecc_modes & ECC_BCH_4BIT) {
1828 /* BCH */
1829 host->bch_enabled = true;
1830 ecc_mode = 0;
1831
1832 if (wide_bus) {
1833 host->ecc_bytes_hw = 8;
1834 host->spare_bytes = 2;
1835 host->bbm_size = 2;
1836 } else {
1837 host->ecc_bytes_hw = 7;
1838 host->spare_bytes = 4;
1839 host->bbm_size = 1;
1840 }
1841 } else {
1842 /* RS */
1843 host->ecc_bytes_hw = 10;
1844
1845 if (wide_bus) {
1846 host->spare_bytes = 0;
1847 host->bbm_size = 2;
1848 } else {
1849 host->spare_bytes = 1;
1850 host->bbm_size = 1;
1851 }
1852 }
1853 }
1854
1855 /*
1856 * we consider ecc->bytes as the sum of all the non-data content in a
1857 * step. It gives us a clean representation of the oob area (even if
1858 * all the bytes aren't used for ECC).It is always 16 bytes for 8 bit
1859 * ECC and 12 bytes for 4 bit ECC
1860 */
1861 ecc->bytes = host->ecc_bytes_hw + host->spare_bytes + host->bbm_size;
1862
1863 ecc->read_page = qcom_nandc_read_page;
1864 ecc->read_page_raw = qcom_nandc_read_page_raw;
1865 ecc->read_oob = qcom_nandc_read_oob;
1866 ecc->write_page = qcom_nandc_write_page;
1867 ecc->write_page_raw = qcom_nandc_write_page_raw;
1868 ecc->write_oob = qcom_nandc_write_oob;
1869
1870 ecc->mode = NAND_ECC_HW;
1871
Boris Brezillon421e81c2016-03-18 17:54:27 +01001872 mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301873
1874 cwperpage = mtd->writesize / ecc->size;
1875
1876 /*
1877 * DATA_UD_BYTES varies based on whether the read/write command protects
1878 * spare data with ECC too. We protect spare data by default, so we set
1879 * it to main + spare data, which are 512 and 4 bytes respectively.
1880 */
1881 host->cw_data = 516;
1882
1883 /*
1884 * total bytes in a step, either 528 bytes for 4 bit ECC, or 532 bytes
1885 * for 8 bit ECC
1886 */
1887 host->cw_size = host->cw_data + ecc->bytes;
1888
1889 if (ecc->bytes * (mtd->writesize / ecc->size) > mtd->oobsize) {
1890 dev_err(nandc->dev, "ecc data doesn't fit in OOB area\n");
1891 return -EINVAL;
1892 }
1893
1894 bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1;
1895
1896 host->cfg0 = (cwperpage - 1) << CW_PER_PAGE
1897 | host->cw_data << UD_SIZE_BYTES
1898 | 0 << DISABLE_STATUS_AFTER_WRITE
1899 | 5 << NUM_ADDR_CYCLES
1900 | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_RS
1901 | 0 << STATUS_BFR_READ
1902 | 1 << SET_RD_MODE_AFTER_STATUS
1903 | host->spare_bytes << SPARE_SIZE_BYTES;
1904
1905 host->cfg1 = 7 << NAND_RECOVERY_CYCLES
1906 | 0 << CS_ACTIVE_BSY
1907 | bad_block_byte << BAD_BLOCK_BYTE_NUM
1908 | 0 << BAD_BLOCK_IN_SPARE_AREA
1909 | 2 << WR_RD_BSY_GAP
1910 | wide_bus << WIDE_FLASH
1911 | host->bch_enabled << ENABLE_BCH_ECC;
1912
1913 host->cfg0_raw = (cwperpage - 1) << CW_PER_PAGE
1914 | host->cw_size << UD_SIZE_BYTES
1915 | 5 << NUM_ADDR_CYCLES
1916 | 0 << SPARE_SIZE_BYTES;
1917
1918 host->cfg1_raw = 7 << NAND_RECOVERY_CYCLES
1919 | 0 << CS_ACTIVE_BSY
1920 | 17 << BAD_BLOCK_BYTE_NUM
1921 | 1 << BAD_BLOCK_IN_SPARE_AREA
1922 | 2 << WR_RD_BSY_GAP
1923 | wide_bus << WIDE_FLASH
1924 | 1 << DEV0_CFG1_ECC_DISABLE;
1925
Abhishek Sahu10777de2017-08-03 17:56:39 +02001926 host->ecc_bch_cfg = !host->bch_enabled << ECC_CFG_ECC_DISABLE
Archit Tanejac76b78d2016-02-03 14:29:50 +05301927 | 0 << ECC_SW_RESET
1928 | host->cw_data << ECC_NUM_DATA_BYTES
1929 | 1 << ECC_FORCE_CLK_OPEN
1930 | ecc_mode << ECC_MODE
1931 | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
1932
1933 host->ecc_buf_cfg = 0x203 << NUM_STEPS;
1934
1935 host->clrflashstatus = FS_READY_BSY_N;
1936 host->clrreadstatus = 0xc0;
1937
1938 dev_dbg(nandc->dev,
1939 "cfg0 %x cfg1 %x ecc_buf_cfg %x ecc_bch cfg %x cw_size %d cw_data %d strength %d parity_bytes %d steps %d\n",
1940 host->cfg0, host->cfg1, host->ecc_buf_cfg, host->ecc_bch_cfg,
1941 host->cw_size, host->cw_data, ecc->strength, ecc->bytes,
1942 cwperpage);
1943
1944 return 0;
1945}
1946
1947static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
1948{
1949 int ret;
1950
1951 ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
1952 if (ret) {
1953 dev_err(nandc->dev, "failed to set DMA mask\n");
1954 return ret;
1955 }
1956
1957 /*
1958 * we use the internal buffer for reading ONFI params, reading small
1959 * data like ID and status, and preforming read-copy-write operations
1960 * when writing to a codeword partially. 532 is the maximum possible
1961 * size of a codeword for our nand controller
1962 */
1963 nandc->buf_size = 532;
1964
1965 nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size,
1966 GFP_KERNEL);
1967 if (!nandc->data_buffer)
1968 return -ENOMEM;
1969
1970 nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs),
1971 GFP_KERNEL);
1972 if (!nandc->regs)
1973 return -ENOMEM;
1974
1975 nandc->reg_read_buf = devm_kzalloc(nandc->dev,
1976 MAX_REG_RD * sizeof(*nandc->reg_read_buf),
1977 GFP_KERNEL);
1978 if (!nandc->reg_read_buf)
1979 return -ENOMEM;
1980
1981 nandc->chan = dma_request_slave_channel(nandc->dev, "rxtx");
1982 if (!nandc->chan) {
1983 dev_err(nandc->dev, "failed to request slave channel\n");
1984 return -ENODEV;
1985 }
1986
1987 INIT_LIST_HEAD(&nandc->desc_list);
1988 INIT_LIST_HEAD(&nandc->host_list);
1989
Marc Gonzalezd45bc582016-07-27 11:23:52 +02001990 nand_hw_control_init(&nandc->controller);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301991
1992 return 0;
1993}
1994
1995static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
1996{
1997 dma_release_channel(nandc->chan);
1998}
1999
2000/* one time setup of a few nand controller registers */
2001static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
2002{
2003 /* kill onenand */
2004 nandc_write(nandc, SFLASHC_BURST_CFG, 0);
Abhishek Sahud8a9b322017-08-11 17:09:16 +05302005 nandc_write(nandc, NAND_DEV_CMD_VLD, NAND_DEV_CMD_VLD_VAL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05302006
2007 /* enable ADM DMA */
2008 nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
2009
2010 /* save the original values of these registers */
2011 nandc->cmd1 = nandc_read(nandc, NAND_DEV_CMD1);
Abhishek Sahud8a9b322017-08-11 17:09:16 +05302012 nandc->vld = NAND_DEV_CMD_VLD_VAL;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302013
2014 return 0;
2015}
2016
2017static int qcom_nand_host_init(struct qcom_nand_controller *nandc,
2018 struct qcom_nand_host *host,
2019 struct device_node *dn)
2020{
2021 struct nand_chip *chip = &host->chip;
2022 struct mtd_info *mtd = nand_to_mtd(chip);
2023 struct device *dev = nandc->dev;
2024 int ret;
2025
2026 ret = of_property_read_u32(dn, "reg", &host->cs);
2027 if (ret) {
2028 dev_err(dev, "can't get chip-select\n");
2029 return -ENXIO;
2030 }
2031
2032 nand_set_flash_node(chip, dn);
2033 mtd->name = devm_kasprintf(dev, GFP_KERNEL, "qcom_nand.%d", host->cs);
2034 mtd->owner = THIS_MODULE;
2035 mtd->dev.parent = dev;
2036
2037 chip->cmdfunc = qcom_nandc_command;
2038 chip->select_chip = qcom_nandc_select_chip;
2039 chip->read_byte = qcom_nandc_read_byte;
2040 chip->read_buf = qcom_nandc_read_buf;
2041 chip->write_buf = qcom_nandc_write_buf;
Boris Brezillon4a78cc62017-05-26 17:10:15 +02002042 chip->onfi_set_features = nand_onfi_get_set_features_notsupp;
2043 chip->onfi_get_features = nand_onfi_get_set_features_notsupp;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302044
2045 /*
2046 * the bad block marker is readable only when we read the last codeword
2047 * of a page with ECC disabled. currently, the nand_base and nand_bbt
2048 * helpers don't allow us to read BB from a nand chip with ECC
2049 * disabled (MTD_OPS_PLACE_OOB is set by default). use the block_bad
2050 * and block_markbad helpers until we permanently switch to using
2051 * MTD_OPS_RAW for all drivers (with the help of badblockbits)
2052 */
2053 chip->block_bad = qcom_nandc_block_bad;
2054 chip->block_markbad = qcom_nandc_block_markbad;
2055
2056 chip->controller = &nandc->controller;
2057 chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USE_BOUNCE_BUFFER |
2058 NAND_SKIP_BBTSCAN;
2059
2060 /* set up initial status value */
2061 host->status = NAND_STATUS_READY | NAND_STATUS_WP;
2062
2063 ret = nand_scan_ident(mtd, 1, NULL);
2064 if (ret)
2065 return ret;
2066
2067 ret = qcom_nand_host_setup(host);
Abhishek Sahu89f51272017-07-19 17:17:58 +05302068
2069 return ret;
2070}
2071
2072static int qcom_nand_mtd_register(struct qcom_nand_controller *nandc,
2073 struct qcom_nand_host *host,
2074 struct device_node *dn)
2075{
2076 struct nand_chip *chip = &host->chip;
2077 struct mtd_info *mtd = nand_to_mtd(chip);
2078 int ret;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302079
2080 ret = nand_scan_tail(mtd);
2081 if (ret)
2082 return ret;
2083
Abhishek Sahu89f51272017-07-19 17:17:58 +05302084 ret = mtd_device_register(mtd, NULL, 0);
2085 if (ret)
2086 nand_cleanup(mtd_to_nand(mtd));
2087
2088 return ret;
2089}
2090
2091static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
2092{
2093 struct device *dev = nandc->dev;
2094 struct device_node *dn = dev->of_node, *child;
2095 struct qcom_nand_host *host, *tmp;
2096 int ret;
2097
2098 for_each_available_child_of_node(dn, child) {
2099 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
2100 if (!host) {
2101 of_node_put(child);
2102 return -ENOMEM;
2103 }
2104
2105 ret = qcom_nand_host_init(nandc, host, child);
2106 if (ret) {
2107 devm_kfree(dev, host);
2108 continue;
2109 }
2110
2111 list_add_tail(&host->node, &nandc->host_list);
2112 }
2113
2114 if (list_empty(&nandc->host_list))
2115 return -ENODEV;
2116
2117 list_for_each_entry_safe(host, tmp, &nandc->host_list, node) {
2118 ret = qcom_nand_mtd_register(nandc, host, child);
2119 if (ret) {
2120 list_del(&host->node);
2121 devm_kfree(dev, host);
2122 }
2123 }
2124
2125 if (list_empty(&nandc->host_list))
2126 return -ENODEV;
2127
2128 return 0;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302129}
2130
2131/* parse custom DT properties here */
2132static int qcom_nandc_parse_dt(struct platform_device *pdev)
2133{
2134 struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
2135 struct device_node *np = nandc->dev->of_node;
2136 int ret;
2137
2138 ret = of_property_read_u32(np, "qcom,cmd-crci", &nandc->cmd_crci);
2139 if (ret) {
2140 dev_err(nandc->dev, "command CRCI unspecified\n");
2141 return ret;
2142 }
2143
2144 ret = of_property_read_u32(np, "qcom,data-crci", &nandc->data_crci);
2145 if (ret) {
2146 dev_err(nandc->dev, "data CRCI unspecified\n");
2147 return ret;
2148 }
2149
2150 return 0;
2151}
2152
2153static int qcom_nandc_probe(struct platform_device *pdev)
2154{
2155 struct qcom_nand_controller *nandc;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302156 const void *dev_data;
2157 struct device *dev = &pdev->dev;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302158 struct resource *res;
2159 int ret;
2160
2161 nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc), GFP_KERNEL);
2162 if (!nandc)
2163 return -ENOMEM;
2164
2165 platform_set_drvdata(pdev, nandc);
2166 nandc->dev = dev;
2167
2168 dev_data = of_device_get_match_data(dev);
2169 if (!dev_data) {
2170 dev_err(&pdev->dev, "failed to get device data\n");
2171 return -ENODEV;
2172 }
2173
2174 nandc->ecc_modes = (unsigned long)dev_data;
2175
2176 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2177 nandc->base = devm_ioremap_resource(dev, res);
2178 if (IS_ERR(nandc->base))
2179 return PTR_ERR(nandc->base);
2180
2181 nandc->base_dma = phys_to_dma(dev, (phys_addr_t)res->start);
2182
2183 nandc->core_clk = devm_clk_get(dev, "core");
2184 if (IS_ERR(nandc->core_clk))
2185 return PTR_ERR(nandc->core_clk);
2186
2187 nandc->aon_clk = devm_clk_get(dev, "aon");
2188 if (IS_ERR(nandc->aon_clk))
2189 return PTR_ERR(nandc->aon_clk);
2190
2191 ret = qcom_nandc_parse_dt(pdev);
2192 if (ret)
2193 return ret;
2194
2195 ret = qcom_nandc_alloc(nandc);
2196 if (ret)
2197 return ret;
2198
2199 ret = clk_prepare_enable(nandc->core_clk);
2200 if (ret)
2201 goto err_core_clk;
2202
2203 ret = clk_prepare_enable(nandc->aon_clk);
2204 if (ret)
2205 goto err_aon_clk;
2206
2207 ret = qcom_nandc_setup(nandc);
2208 if (ret)
2209 goto err_setup;
2210
Abhishek Sahu89f51272017-07-19 17:17:58 +05302211 ret = qcom_probe_nand_devices(nandc);
2212 if (ret)
2213 goto err_setup;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302214
2215 return 0;
2216
Archit Tanejac76b78d2016-02-03 14:29:50 +05302217err_setup:
2218 clk_disable_unprepare(nandc->aon_clk);
2219err_aon_clk:
2220 clk_disable_unprepare(nandc->core_clk);
2221err_core_clk:
2222 qcom_nandc_unalloc(nandc);
2223
2224 return ret;
2225}
2226
2227static int qcom_nandc_remove(struct platform_device *pdev)
2228{
2229 struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
2230 struct qcom_nand_host *host;
2231
2232 list_for_each_entry(host, &nandc->host_list, node)
2233 nand_release(nand_to_mtd(&host->chip));
2234
2235 qcom_nandc_unalloc(nandc);
2236
2237 clk_disable_unprepare(nandc->aon_clk);
2238 clk_disable_unprepare(nandc->core_clk);
2239
2240 return 0;
2241}
2242
2243#define EBI2_NANDC_ECC_MODES (ECC_RS_4BIT | ECC_BCH_8BIT)
2244
2245/*
2246 * data will hold a struct pointer containing more differences once we support
2247 * more controller variants
2248 */
2249static const struct of_device_id qcom_nandc_of_match[] = {
2250 { .compatible = "qcom,ipq806x-nand",
2251 .data = (void *)EBI2_NANDC_ECC_MODES,
2252 },
2253 {}
2254};
2255MODULE_DEVICE_TABLE(of, qcom_nandc_of_match);
2256
2257static struct platform_driver qcom_nandc_driver = {
2258 .driver = {
2259 .name = "qcom-nandc",
2260 .of_match_table = qcom_nandc_of_match,
2261 },
2262 .probe = qcom_nandc_probe,
2263 .remove = qcom_nandc_remove,
2264};
2265module_platform_driver(qcom_nandc_driver);
2266
2267MODULE_AUTHOR("Archit Taneja <architt@codeaurora.org>");
2268MODULE_DESCRIPTION("Qualcomm NAND Controller driver");
2269MODULE_LICENSE("GPL v2");