blob: d24e67b951671fa1ad07ac1192986fa2f4e00a89 [file] [log] [blame]
Boris Brezillonf88fc122017-03-16 09:02:40 +01001/*
2 * Copyright 2017 ATMEL
3 * Copyright 2017 Free Electrons
4 *
5 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
6 *
7 * Derived from the atmel_nand.c driver which contained the following
8 * copyrights:
9 *
10 * Copyright 2003 Rick Bronson
11 *
12 * Derived from drivers/mtd/nand/autcpu12.c
13 * Copyright 2001 Thomas Gleixner (gleixner@autronix.de)
14 *
15 * Derived from drivers/mtd/spia.c
16 * Copyright 2000 Steven J. Hill (sjhill@cotw.com)
17 *
18 *
19 * Add Hardware ECC support for AT91SAM9260 / AT91SAM9263
20 * Richard Genoud (richard.genoud@gmail.com), Adeneo Copyright 2007
21 *
22 * Derived from Das U-Boot source code
23 * (u-boot-1.1.5/board/atmel/at91sam9263ek/nand.c)
24 * Copyright 2006 ATMEL Rousset, Lacressonniere Nicolas
25 *
26 * Add Programmable Multibit ECC support for various AT91 SoC
27 * Copyright 2012 ATMEL, Hong Xu
28 *
29 * Add Nand Flash Controller support for SAMA5 SoC
30 * Copyright 2013 ATMEL, Josh Wu (josh.wu@atmel.com)
31 *
32 * This program is free software; you can redistribute it and/or modify
33 * it under the terms of the GNU General Public License version 2 as
34 * published by the Free Software Foundation.
35 *
36 * A few words about the naming convention in this file. This convention
37 * applies to structure and function names.
38 *
39 * Prefixes:
40 *
41 * - atmel_nand_: all generic structures/functions
42 * - atmel_smc_nand_: all structures/functions specific to the SMC interface
43 * (at91sam9 and avr32 SoCs)
44 * - atmel_hsmc_nand_: all structures/functions specific to the HSMC interface
45 * (sama5 SoCs and later)
46 * - atmel_nfc_: all structures/functions used to manipulate the NFC sub-block
47 * that is available in the HSMC block
48 * - <soc>_nand_: all SoC specific structures/functions
49 */
50
51#include <linux/clk.h>
52#include <linux/dma-mapping.h>
53#include <linux/dmaengine.h>
54#include <linux/genalloc.h>
55#include <linux/gpio.h>
56#include <linux/gpio/consumer.h>
57#include <linux/interrupt.h>
58#include <linux/mfd/syscon.h>
59#include <linux/mfd/syscon/atmel-matrix.h>
Boris Brezillonf9ce2ed2017-03-16 09:35:59 +010060#include <linux/mfd/syscon/atmel-smc.h>
Boris Brezillonf88fc122017-03-16 09:02:40 +010061#include <linux/module.h>
62#include <linux/mtd/nand.h>
63#include <linux/of_address.h>
64#include <linux/of_irq.h>
65#include <linux/of_platform.h>
66#include <linux/iopoll.h>
67#include <linux/platform_device.h>
68#include <linux/platform_data/atmel.h>
69#include <linux/regmap.h>
70
71#include "pmecc.h"
72
73#define ATMEL_HSMC_NFC_CFG 0x0
74#define ATMEL_HSMC_NFC_CFG_SPARESIZE(x) (((x) / 4) << 24)
75#define ATMEL_HSMC_NFC_CFG_SPARESIZE_MASK GENMASK(30, 24)
76#define ATMEL_HSMC_NFC_CFG_DTO(cyc, mul) (((cyc) << 16) | ((mul) << 20))
77#define ATMEL_HSMC_NFC_CFG_DTO_MAX GENMASK(22, 16)
78#define ATMEL_HSMC_NFC_CFG_RBEDGE BIT(13)
79#define ATMEL_HSMC_NFC_CFG_FALLING_EDGE BIT(12)
80#define ATMEL_HSMC_NFC_CFG_RSPARE BIT(9)
81#define ATMEL_HSMC_NFC_CFG_WSPARE BIT(8)
82#define ATMEL_HSMC_NFC_CFG_PAGESIZE_MASK GENMASK(2, 0)
83#define ATMEL_HSMC_NFC_CFG_PAGESIZE(x) (fls((x) / 512) - 1)
84
85#define ATMEL_HSMC_NFC_CTRL 0x4
86#define ATMEL_HSMC_NFC_CTRL_EN BIT(0)
87#define ATMEL_HSMC_NFC_CTRL_DIS BIT(1)
88
89#define ATMEL_HSMC_NFC_SR 0x8
90#define ATMEL_HSMC_NFC_IER 0xc
91#define ATMEL_HSMC_NFC_IDR 0x10
92#define ATMEL_HSMC_NFC_IMR 0x14
93#define ATMEL_HSMC_NFC_SR_ENABLED BIT(1)
94#define ATMEL_HSMC_NFC_SR_RB_RISE BIT(4)
95#define ATMEL_HSMC_NFC_SR_RB_FALL BIT(5)
96#define ATMEL_HSMC_NFC_SR_BUSY BIT(8)
97#define ATMEL_HSMC_NFC_SR_WR BIT(11)
98#define ATMEL_HSMC_NFC_SR_CSID GENMASK(14, 12)
99#define ATMEL_HSMC_NFC_SR_XFRDONE BIT(16)
100#define ATMEL_HSMC_NFC_SR_CMDDONE BIT(17)
101#define ATMEL_HSMC_NFC_SR_DTOE BIT(20)
102#define ATMEL_HSMC_NFC_SR_UNDEF BIT(21)
103#define ATMEL_HSMC_NFC_SR_AWB BIT(22)
104#define ATMEL_HSMC_NFC_SR_NFCASE BIT(23)
105#define ATMEL_HSMC_NFC_SR_ERRORS (ATMEL_HSMC_NFC_SR_DTOE | \
106 ATMEL_HSMC_NFC_SR_UNDEF | \
107 ATMEL_HSMC_NFC_SR_AWB | \
108 ATMEL_HSMC_NFC_SR_NFCASE)
109#define ATMEL_HSMC_NFC_SR_RBEDGE(x) BIT((x) + 24)
110
111#define ATMEL_HSMC_NFC_ADDR 0x18
112#define ATMEL_HSMC_NFC_BANK 0x1c
113
114#define ATMEL_NFC_MAX_RB_ID 7
115
116#define ATMEL_NFC_SRAM_SIZE 0x2400
117
118#define ATMEL_NFC_CMD(pos, cmd) ((cmd) << (((pos) * 8) + 2))
119#define ATMEL_NFC_VCMD2 BIT(18)
120#define ATMEL_NFC_ACYCLE(naddrs) ((naddrs) << 19)
121#define ATMEL_NFC_CSID(cs) ((cs) << 22)
122#define ATMEL_NFC_DATAEN BIT(25)
123#define ATMEL_NFC_NFCWR BIT(26)
124
125#define ATMEL_NFC_MAX_ADDR_CYCLES 5
126
127#define ATMEL_NAND_ALE_OFFSET BIT(21)
128#define ATMEL_NAND_CLE_OFFSET BIT(22)
129
130#define DEFAULT_TIMEOUT_MS 1000
131#define MIN_DMA_LEN 128
132
133enum atmel_nand_rb_type {
134 ATMEL_NAND_NO_RB,
135 ATMEL_NAND_NATIVE_RB,
136 ATMEL_NAND_GPIO_RB,
137};
138
139struct atmel_nand_rb {
140 enum atmel_nand_rb_type type;
141 union {
142 struct gpio_desc *gpio;
143 int id;
144 };
145};
146
147struct atmel_nand_cs {
148 int id;
149 struct atmel_nand_rb rb;
150 struct gpio_desc *csgpio;
151 struct {
152 void __iomem *virt;
153 dma_addr_t dma;
154 } io;
Boris Brezillonf9ce2ed2017-03-16 09:35:59 +0100155
156 struct atmel_smc_cs_conf smcconf;
Boris Brezillonf88fc122017-03-16 09:02:40 +0100157};
158
159struct atmel_nand {
160 struct list_head node;
161 struct device *dev;
162 struct nand_chip base;
163 struct atmel_nand_cs *activecs;
164 struct atmel_pmecc_user *pmecc;
165 struct gpio_desc *cdgpio;
166 int numcs;
167 struct atmel_nand_cs cs[];
168};
169
170static inline struct atmel_nand *to_atmel_nand(struct nand_chip *chip)
171{
172 return container_of(chip, struct atmel_nand, base);
173}
174
175enum atmel_nfc_data_xfer {
176 ATMEL_NFC_NO_DATA,
177 ATMEL_NFC_READ_DATA,
178 ATMEL_NFC_WRITE_DATA,
179};
180
181struct atmel_nfc_op {
182 u8 cs;
183 u8 ncmds;
184 u8 cmds[2];
185 u8 naddrs;
186 u8 addrs[5];
187 enum atmel_nfc_data_xfer data;
188 u32 wait;
189 u32 errors;
190};
191
192struct atmel_nand_controller;
193struct atmel_nand_controller_caps;
194
195struct atmel_nand_controller_ops {
196 int (*probe)(struct platform_device *pdev,
197 const struct atmel_nand_controller_caps *caps);
198 int (*remove)(struct atmel_nand_controller *nc);
199 void (*nand_init)(struct atmel_nand_controller *nc,
200 struct atmel_nand *nand);
201 int (*ecc_init)(struct atmel_nand *nand);
Boris Brezillonf9ce2ed2017-03-16 09:35:59 +0100202 int (*setup_data_interface)(struct atmel_nand *nand, int csline,
203 const struct nand_data_interface *conf);
Boris Brezillonf88fc122017-03-16 09:02:40 +0100204};
205
206struct atmel_nand_controller_caps {
207 bool has_dma;
208 bool legacy_of_bindings;
209 u32 ale_offs;
210 u32 cle_offs;
211 const struct atmel_nand_controller_ops *ops;
212};
213
214struct atmel_nand_controller {
215 struct nand_hw_control base;
216 const struct atmel_nand_controller_caps *caps;
217 struct device *dev;
218 struct regmap *smc;
219 struct dma_chan *dmac;
220 struct atmel_pmecc *pmecc;
221 struct list_head chips;
222 struct clk *mck;
223};
224
225static inline struct atmel_nand_controller *
226to_nand_controller(struct nand_hw_control *ctl)
227{
228 return container_of(ctl, struct atmel_nand_controller, base);
229}
230
231struct atmel_smc_nand_controller {
232 struct atmel_nand_controller base;
233 struct regmap *matrix;
234 unsigned int ebi_csa_offs;
235};
236
237static inline struct atmel_smc_nand_controller *
238to_smc_nand_controller(struct nand_hw_control *ctl)
239{
240 return container_of(to_nand_controller(ctl),
241 struct atmel_smc_nand_controller, base);
242}
243
244struct atmel_hsmc_nand_controller {
245 struct atmel_nand_controller base;
246 struct {
247 struct gen_pool *pool;
248 void __iomem *virt;
249 dma_addr_t dma;
250 } sram;
251 struct regmap *io;
252 struct atmel_nfc_op op;
253 struct completion complete;
254 int irq;
255
256 /* Only used when instantiating from legacy DT bindings. */
257 struct clk *clk;
258};
259
260static inline struct atmel_hsmc_nand_controller *
261to_hsmc_nand_controller(struct nand_hw_control *ctl)
262{
263 return container_of(to_nand_controller(ctl),
264 struct atmel_hsmc_nand_controller, base);
265}
266
267static bool atmel_nfc_op_done(struct atmel_nfc_op *op, u32 status)
268{
269 op->errors |= status & ATMEL_HSMC_NFC_SR_ERRORS;
270 op->wait ^= status & op->wait;
271
272 return !op->wait || op->errors;
273}
274
275static irqreturn_t atmel_nfc_interrupt(int irq, void *data)
276{
277 struct atmel_hsmc_nand_controller *nc = data;
278 u32 sr, rcvd;
279 bool done;
280
281 regmap_read(nc->base.smc, ATMEL_HSMC_NFC_SR, &sr);
282
283 rcvd = sr & (nc->op.wait | ATMEL_HSMC_NFC_SR_ERRORS);
284 done = atmel_nfc_op_done(&nc->op, sr);
285
286 if (rcvd)
287 regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IDR, rcvd);
288
289 if (done)
290 complete(&nc->complete);
291
292 return rcvd ? IRQ_HANDLED : IRQ_NONE;
293}
294
295static int atmel_nfc_wait(struct atmel_hsmc_nand_controller *nc, bool poll,
296 unsigned int timeout_ms)
297{
298 int ret;
299
300 if (!timeout_ms)
301 timeout_ms = DEFAULT_TIMEOUT_MS;
302
303 if (poll) {
304 u32 status;
305
306 ret = regmap_read_poll_timeout(nc->base.smc,
307 ATMEL_HSMC_NFC_SR, status,
308 atmel_nfc_op_done(&nc->op,
309 status),
310 0, timeout_ms * 1000);
311 } else {
312 init_completion(&nc->complete);
313 regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IER,
314 nc->op.wait | ATMEL_HSMC_NFC_SR_ERRORS);
315 ret = wait_for_completion_timeout(&nc->complete,
316 msecs_to_jiffies(timeout_ms));
317 if (!ret)
318 ret = -ETIMEDOUT;
319 else
320 ret = 0;
321
322 regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IDR, 0xffffffff);
323 }
324
325 if (nc->op.errors & ATMEL_HSMC_NFC_SR_DTOE) {
326 dev_err(nc->base.dev, "Waiting NAND R/B Timeout\n");
327 ret = -ETIMEDOUT;
328 }
329
330 if (nc->op.errors & ATMEL_HSMC_NFC_SR_UNDEF) {
331 dev_err(nc->base.dev, "Access to an undefined area\n");
332 ret = -EIO;
333 }
334
335 if (nc->op.errors & ATMEL_HSMC_NFC_SR_AWB) {
336 dev_err(nc->base.dev, "Access while busy\n");
337 ret = -EIO;
338 }
339
340 if (nc->op.errors & ATMEL_HSMC_NFC_SR_NFCASE) {
341 dev_err(nc->base.dev, "Wrong access size\n");
342 ret = -EIO;
343 }
344
345 return ret;
346}
347
348static void atmel_nand_dma_transfer_finished(void *data)
349{
350 struct completion *finished = data;
351
352 complete(finished);
353}
354
355static int atmel_nand_dma_transfer(struct atmel_nand_controller *nc,
356 void *buf, dma_addr_t dev_dma, size_t len,
357 enum dma_data_direction dir)
358{
359 DECLARE_COMPLETION_ONSTACK(finished);
360 dma_addr_t src_dma, dst_dma, buf_dma;
361 struct dma_async_tx_descriptor *tx;
362 dma_cookie_t cookie;
363
364 buf_dma = dma_map_single(nc->dev, buf, len, dir);
365 if (dma_mapping_error(nc->dev, dev_dma)) {
366 dev_err(nc->dev,
367 "Failed to prepare a buffer for DMA access\n");
368 goto err;
369 }
370
371 if (dir == DMA_FROM_DEVICE) {
372 src_dma = dev_dma;
373 dst_dma = buf_dma;
374 } else {
375 src_dma = buf_dma;
376 dst_dma = dev_dma;
377 }
378
379 tx = dmaengine_prep_dma_memcpy(nc->dmac, dst_dma, src_dma, len,
380 DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
381 if (!tx) {
382 dev_err(nc->dev, "Failed to prepare DMA memcpy\n");
383 goto err_unmap;
384 }
385
386 tx->callback = atmel_nand_dma_transfer_finished;
387 tx->callback_param = &finished;
388
389 cookie = dmaengine_submit(tx);
390 if (dma_submit_error(cookie)) {
391 dev_err(nc->dev, "Failed to do DMA tx_submit\n");
392 goto err_unmap;
393 }
394
395 dma_async_issue_pending(nc->dmac);
396 wait_for_completion(&finished);
397
398 return 0;
399
400err_unmap:
401 dma_unmap_single(nc->dev, buf_dma, len, dir);
402
403err:
404 dev_dbg(nc->dev, "Fall back to CPU I/O\n");
405
406 return -EIO;
407}
408
409static u8 atmel_nand_read_byte(struct mtd_info *mtd)
410{
411 struct nand_chip *chip = mtd_to_nand(mtd);
412 struct atmel_nand *nand = to_atmel_nand(chip);
413
414 return ioread8(nand->activecs->io.virt);
415}
416
417static u16 atmel_nand_read_word(struct mtd_info *mtd)
418{
419 struct nand_chip *chip = mtd_to_nand(mtd);
420 struct atmel_nand *nand = to_atmel_nand(chip);
421
422 return ioread16(nand->activecs->io.virt);
423}
424
425static void atmel_nand_write_byte(struct mtd_info *mtd, u8 byte)
426{
427 struct nand_chip *chip = mtd_to_nand(mtd);
428 struct atmel_nand *nand = to_atmel_nand(chip);
429
430 if (chip->options & NAND_BUSWIDTH_16)
431 iowrite16(byte | (byte << 8), nand->activecs->io.virt);
432 else
433 iowrite8(byte, nand->activecs->io.virt);
434}
435
436static void atmel_nand_read_buf(struct mtd_info *mtd, u8 *buf, int len)
437{
438 struct nand_chip *chip = mtd_to_nand(mtd);
439 struct atmel_nand *nand = to_atmel_nand(chip);
440 struct atmel_nand_controller *nc;
441
442 nc = to_nand_controller(chip->controller);
443
444 /*
445 * If the controller supports DMA, the buffer address is DMA-able and
446 * len is long enough to make DMA transfers profitable, let's trigger
447 * a DMA transfer. If it fails, fallback to PIO mode.
448 */
449 if (nc->dmac && virt_addr_valid(buf) &&
450 len >= MIN_DMA_LEN &&
451 !atmel_nand_dma_transfer(nc, buf, nand->activecs->io.dma, len,
452 DMA_FROM_DEVICE))
453 return;
454
455 if (chip->options & NAND_BUSWIDTH_16)
456 ioread16_rep(nand->activecs->io.virt, buf, len / 2);
457 else
458 ioread8_rep(nand->activecs->io.virt, buf, len);
459}
460
461static void atmel_nand_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
462{
463 struct nand_chip *chip = mtd_to_nand(mtd);
464 struct atmel_nand *nand = to_atmel_nand(chip);
465 struct atmel_nand_controller *nc;
466
467 nc = to_nand_controller(chip->controller);
468
469 /*
470 * If the controller supports DMA, the buffer address is DMA-able and
471 * len is long enough to make DMA transfers profitable, let's trigger
472 * a DMA transfer. If it fails, fallback to PIO mode.
473 */
474 if (nc->dmac && virt_addr_valid(buf) &&
475 len >= MIN_DMA_LEN &&
476 !atmel_nand_dma_transfer(nc, (void *)buf, nand->activecs->io.dma,
477 len, DMA_TO_DEVICE))
478 return;
479
480 if (chip->options & NAND_BUSWIDTH_16)
481 iowrite16_rep(nand->activecs->io.virt, buf, len / 2);
482 else
483 iowrite8_rep(nand->activecs->io.virt, buf, len);
484}
485
486static int atmel_nand_dev_ready(struct mtd_info *mtd)
487{
488 struct nand_chip *chip = mtd_to_nand(mtd);
489 struct atmel_nand *nand = to_atmel_nand(chip);
490
491 return gpiod_get_value(nand->activecs->rb.gpio);
492}
493
494static void atmel_nand_select_chip(struct mtd_info *mtd, int cs)
495{
496 struct nand_chip *chip = mtd_to_nand(mtd);
497 struct atmel_nand *nand = to_atmel_nand(chip);
498
499 if (cs < 0 || cs >= nand->numcs) {
500 nand->activecs = NULL;
501 chip->dev_ready = NULL;
502 return;
503 }
504
505 nand->activecs = &nand->cs[cs];
506
507 if (nand->activecs->rb.type == ATMEL_NAND_GPIO_RB)
508 chip->dev_ready = atmel_nand_dev_ready;
509}
510
511static int atmel_hsmc_nand_dev_ready(struct mtd_info *mtd)
512{
513 struct nand_chip *chip = mtd_to_nand(mtd);
514 struct atmel_nand *nand = to_atmel_nand(chip);
515 struct atmel_hsmc_nand_controller *nc;
516 u32 status;
517
518 nc = to_hsmc_nand_controller(chip->controller);
519
520 regmap_read(nc->base.smc, ATMEL_HSMC_NFC_SR, &status);
521
522 return status & ATMEL_HSMC_NFC_SR_RBEDGE(nand->activecs->rb.id);
523}
524
525static void atmel_hsmc_nand_select_chip(struct mtd_info *mtd, int cs)
526{
527 struct nand_chip *chip = mtd_to_nand(mtd);
528 struct atmel_nand *nand = to_atmel_nand(chip);
529 struct atmel_hsmc_nand_controller *nc;
530
531 nc = to_hsmc_nand_controller(chip->controller);
532
533 atmel_nand_select_chip(mtd, cs);
534
535 if (!nand->activecs) {
536 regmap_write(nc->base.smc, ATMEL_HSMC_NFC_CTRL,
537 ATMEL_HSMC_NFC_CTRL_DIS);
538 return;
539 }
540
541 if (nand->activecs->rb.type == ATMEL_NAND_NATIVE_RB)
542 chip->dev_ready = atmel_hsmc_nand_dev_ready;
543
544 regmap_update_bits(nc->base.smc, ATMEL_HSMC_NFC_CFG,
545 ATMEL_HSMC_NFC_CFG_PAGESIZE_MASK |
546 ATMEL_HSMC_NFC_CFG_SPARESIZE_MASK |
547 ATMEL_HSMC_NFC_CFG_RSPARE |
548 ATMEL_HSMC_NFC_CFG_WSPARE,
549 ATMEL_HSMC_NFC_CFG_PAGESIZE(mtd->writesize) |
550 ATMEL_HSMC_NFC_CFG_SPARESIZE(mtd->oobsize) |
551 ATMEL_HSMC_NFC_CFG_RSPARE);
552 regmap_write(nc->base.smc, ATMEL_HSMC_NFC_CTRL,
553 ATMEL_HSMC_NFC_CTRL_EN);
554}
555
556static int atmel_nfc_exec_op(struct atmel_hsmc_nand_controller *nc, bool poll)
557{
558 u8 *addrs = nc->op.addrs;
559 unsigned int op = 0;
560 u32 addr, val;
561 int i, ret;
562
563 nc->op.wait = ATMEL_HSMC_NFC_SR_CMDDONE;
564
565 for (i = 0; i < nc->op.ncmds; i++)
566 op |= ATMEL_NFC_CMD(i, nc->op.cmds[i]);
567
568 if (nc->op.naddrs == ATMEL_NFC_MAX_ADDR_CYCLES)
569 regmap_write(nc->base.smc, ATMEL_HSMC_NFC_ADDR, *addrs++);
570
571 op |= ATMEL_NFC_CSID(nc->op.cs) |
572 ATMEL_NFC_ACYCLE(nc->op.naddrs);
573
574 if (nc->op.ncmds > 1)
575 op |= ATMEL_NFC_VCMD2;
576
577 addr = addrs[0] | (addrs[1] << 8) | (addrs[2] << 16) |
578 (addrs[3] << 24);
579
580 if (nc->op.data != ATMEL_NFC_NO_DATA) {
581 op |= ATMEL_NFC_DATAEN;
582 nc->op.wait |= ATMEL_HSMC_NFC_SR_XFRDONE;
583
584 if (nc->op.data == ATMEL_NFC_WRITE_DATA)
585 op |= ATMEL_NFC_NFCWR;
586 }
587
588 /* Clear all flags. */
589 regmap_read(nc->base.smc, ATMEL_HSMC_NFC_SR, &val);
590
591 /* Send the command. */
592 regmap_write(nc->io, op, addr);
593
594 ret = atmel_nfc_wait(nc, poll, 0);
595 if (ret)
596 dev_err(nc->base.dev,
597 "Failed to send NAND command (err = %d)!",
598 ret);
599
600 /* Reset the op state. */
601 memset(&nc->op, 0, sizeof(nc->op));
602
603 return ret;
604}
605
606static void atmel_hsmc_nand_cmd_ctrl(struct mtd_info *mtd, int dat,
607 unsigned int ctrl)
608{
609 struct nand_chip *chip = mtd_to_nand(mtd);
610 struct atmel_nand *nand = to_atmel_nand(chip);
611 struct atmel_hsmc_nand_controller *nc;
612
613 nc = to_hsmc_nand_controller(chip->controller);
614
615 if (ctrl & NAND_ALE) {
616 if (nc->op.naddrs == ATMEL_NFC_MAX_ADDR_CYCLES)
617 return;
618
619 nc->op.addrs[nc->op.naddrs++] = dat;
620 } else if (ctrl & NAND_CLE) {
621 if (nc->op.ncmds > 1)
622 return;
623
624 nc->op.cmds[nc->op.ncmds++] = dat;
625 }
626
627 if (dat == NAND_CMD_NONE) {
628 nc->op.cs = nand->activecs->id;
629 atmel_nfc_exec_op(nc, true);
630 }
631}
632
633static void atmel_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
634 unsigned int ctrl)
635{
636 struct nand_chip *chip = mtd_to_nand(mtd);
637 struct atmel_nand *nand = to_atmel_nand(chip);
638 struct atmel_nand_controller *nc;
639
640 nc = to_nand_controller(chip->controller);
641
642 if ((ctrl & NAND_CTRL_CHANGE) && nand->activecs->csgpio) {
643 if (ctrl & NAND_NCE)
644 gpiod_set_value(nand->activecs->csgpio, 0);
645 else
646 gpiod_set_value(nand->activecs->csgpio, 1);
647 }
648
649 if (ctrl & NAND_ALE)
650 writeb(cmd, nand->activecs->io.virt + nc->caps->ale_offs);
651 else if (ctrl & NAND_CLE)
652 writeb(cmd, nand->activecs->io.virt + nc->caps->cle_offs);
653}
654
655static void atmel_nfc_copy_to_sram(struct nand_chip *chip, const u8 *buf,
656 bool oob_required)
657{
658 struct mtd_info *mtd = nand_to_mtd(chip);
659 struct atmel_hsmc_nand_controller *nc;
660 int ret = -EIO;
661
662 nc = to_hsmc_nand_controller(chip->controller);
663
664 if (nc->base.dmac)
665 ret = atmel_nand_dma_transfer(&nc->base, (void *)buf,
666 nc->sram.dma, mtd->writesize,
667 DMA_TO_DEVICE);
668
669 /* Falling back to CPU copy. */
670 if (ret)
671 memcpy_toio(nc->sram.virt, buf, mtd->writesize);
672
673 if (oob_required)
674 memcpy_toio(nc->sram.virt + mtd->writesize, chip->oob_poi,
675 mtd->oobsize);
676}
677
678static void atmel_nfc_copy_from_sram(struct nand_chip *chip, u8 *buf,
679 bool oob_required)
680{
681 struct mtd_info *mtd = nand_to_mtd(chip);
682 struct atmel_hsmc_nand_controller *nc;
683 int ret = -EIO;
684
685 nc = to_hsmc_nand_controller(chip->controller);
686
687 if (nc->base.dmac)
688 ret = atmel_nand_dma_transfer(&nc->base, buf, nc->sram.dma,
689 mtd->writesize, DMA_FROM_DEVICE);
690
691 /* Falling back to CPU copy. */
692 if (ret)
693 memcpy_fromio(buf, nc->sram.virt, mtd->writesize);
694
695 if (oob_required)
696 memcpy_fromio(chip->oob_poi, nc->sram.virt + mtd->writesize,
697 mtd->oobsize);
698}
699
700static void atmel_nfc_set_op_addr(struct nand_chip *chip, int page, int column)
701{
702 struct mtd_info *mtd = nand_to_mtd(chip);
703 struct atmel_hsmc_nand_controller *nc;
704
705 nc = to_hsmc_nand_controller(chip->controller);
706
707 if (column >= 0) {
708 nc->op.addrs[nc->op.naddrs++] = column;
709
710 /*
711 * 2 address cycles for the column offset on large page NANDs.
712 */
713 if (mtd->writesize > 512)
714 nc->op.addrs[nc->op.naddrs++] = column >> 8;
715 }
716
717 if (page >= 0) {
718 nc->op.addrs[nc->op.naddrs++] = page;
719 nc->op.addrs[nc->op.naddrs++] = page >> 8;
720
721 if ((mtd->writesize > 512 && chip->chipsize > SZ_128M) ||
722 (mtd->writesize <= 512 && chip->chipsize > SZ_32M))
723 nc->op.addrs[nc->op.naddrs++] = page >> 16;
724 }
725}
726
727static int atmel_nand_pmecc_enable(struct nand_chip *chip, int op, bool raw)
728{
729 struct atmel_nand *nand = to_atmel_nand(chip);
730 struct atmel_nand_controller *nc;
731 int ret;
732
733 nc = to_nand_controller(chip->controller);
734
735 if (raw)
736 return 0;
737
738 ret = atmel_pmecc_enable(nand->pmecc, op);
739 if (ret)
740 dev_err(nc->dev,
741 "Failed to enable ECC engine (err = %d)\n", ret);
742
743 return ret;
744}
745
746static void atmel_nand_pmecc_disable(struct nand_chip *chip, bool raw)
747{
748 struct atmel_nand *nand = to_atmel_nand(chip);
749
750 if (!raw)
751 atmel_pmecc_disable(nand->pmecc);
752}
753
754static int atmel_nand_pmecc_generate_eccbytes(struct nand_chip *chip, bool raw)
755{
756 struct atmel_nand *nand = to_atmel_nand(chip);
757 struct mtd_info *mtd = nand_to_mtd(chip);
758 struct atmel_nand_controller *nc;
759 struct mtd_oob_region oobregion;
760 void *eccbuf;
761 int ret, i;
762
763 nc = to_nand_controller(chip->controller);
764
765 if (raw)
766 return 0;
767
768 ret = atmel_pmecc_wait_rdy(nand->pmecc);
769 if (ret) {
770 dev_err(nc->dev,
771 "Failed to transfer NAND page data (err = %d)\n",
772 ret);
773 return ret;
774 }
775
776 mtd_ooblayout_ecc(mtd, 0, &oobregion);
777 eccbuf = chip->oob_poi + oobregion.offset;
778
779 for (i = 0; i < chip->ecc.steps; i++) {
780 atmel_pmecc_get_generated_eccbytes(nand->pmecc, i,
781 eccbuf);
782 eccbuf += chip->ecc.bytes;
783 }
784
785 return 0;
786}
787
788static int atmel_nand_pmecc_correct_data(struct nand_chip *chip, void *buf,
789 bool raw)
790{
791 struct atmel_nand *nand = to_atmel_nand(chip);
792 struct mtd_info *mtd = nand_to_mtd(chip);
793 struct atmel_nand_controller *nc;
794 struct mtd_oob_region oobregion;
795 int ret, i, max_bitflips = 0;
796 void *databuf, *eccbuf;
797
798 nc = to_nand_controller(chip->controller);
799
800 if (raw)
801 return 0;
802
803 ret = atmel_pmecc_wait_rdy(nand->pmecc);
804 if (ret) {
805 dev_err(nc->dev,
806 "Failed to read NAND page data (err = %d)\n",
807 ret);
808 return ret;
809 }
810
811 mtd_ooblayout_ecc(mtd, 0, &oobregion);
812 eccbuf = chip->oob_poi + oobregion.offset;
813 databuf = buf;
814
815 for (i = 0; i < chip->ecc.steps; i++) {
816 ret = atmel_pmecc_correct_sector(nand->pmecc, i, databuf,
817 eccbuf);
818 if (ret < 0 && !atmel_pmecc_correct_erased_chunks(nand->pmecc))
819 ret = nand_check_erased_ecc_chunk(databuf,
820 chip->ecc.size,
821 eccbuf,
822 chip->ecc.bytes,
823 NULL, 0,
824 chip->ecc.strength);
825
826 if (ret >= 0)
827 max_bitflips = max(ret, max_bitflips);
828 else
829 mtd->ecc_stats.failed++;
830
831 databuf += chip->ecc.size;
832 eccbuf += chip->ecc.bytes;
833 }
834
835 return max_bitflips;
836}
837
838static int atmel_nand_pmecc_write_pg(struct nand_chip *chip, const u8 *buf,
839 bool oob_required, int page, bool raw)
840{
841 struct mtd_info *mtd = nand_to_mtd(chip);
842 struct atmel_nand *nand = to_atmel_nand(chip);
843 int ret;
844
845 ret = atmel_nand_pmecc_enable(chip, NAND_ECC_WRITE, raw);
846 if (ret)
847 return ret;
848
849 atmel_nand_write_buf(mtd, buf, mtd->writesize);
850
851 ret = atmel_nand_pmecc_generate_eccbytes(chip, raw);
852 if (ret) {
853 atmel_pmecc_disable(nand->pmecc);
854 return ret;
855 }
856
857 atmel_nand_pmecc_disable(chip, raw);
858
859 atmel_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
860
861 return 0;
862}
863
864static int atmel_nand_pmecc_write_page(struct mtd_info *mtd,
865 struct nand_chip *chip, const u8 *buf,
866 int oob_required, int page)
867{
868 return atmel_nand_pmecc_write_pg(chip, buf, oob_required, page, false);
869}
870
871static int atmel_nand_pmecc_write_page_raw(struct mtd_info *mtd,
872 struct nand_chip *chip,
873 const u8 *buf, int oob_required,
874 int page)
875{
876 return atmel_nand_pmecc_write_pg(chip, buf, oob_required, page, true);
877}
878
879static int atmel_nand_pmecc_read_pg(struct nand_chip *chip, u8 *buf,
880 bool oob_required, int page, bool raw)
881{
882 struct mtd_info *mtd = nand_to_mtd(chip);
883 int ret;
884
885 ret = atmel_nand_pmecc_enable(chip, NAND_ECC_READ, raw);
886 if (ret)
887 return ret;
888
889 atmel_nand_read_buf(mtd, buf, mtd->writesize);
890 atmel_nand_read_buf(mtd, chip->oob_poi, mtd->oobsize);
891
892 ret = atmel_nand_pmecc_correct_data(chip, buf, raw);
893
894 atmel_nand_pmecc_disable(chip, raw);
895
896 return ret;
897}
898
899static int atmel_nand_pmecc_read_page(struct mtd_info *mtd,
900 struct nand_chip *chip, u8 *buf,
901 int oob_required, int page)
902{
903 return atmel_nand_pmecc_read_pg(chip, buf, oob_required, page, false);
904}
905
906static int atmel_nand_pmecc_read_page_raw(struct mtd_info *mtd,
907 struct nand_chip *chip, u8 *buf,
908 int oob_required, int page)
909{
910 return atmel_nand_pmecc_read_pg(chip, buf, oob_required, page, true);
911}
912
913static int atmel_hsmc_nand_pmecc_write_pg(struct nand_chip *chip,
914 const u8 *buf, bool oob_required,
915 int page, bool raw)
916{
917 struct mtd_info *mtd = nand_to_mtd(chip);
918 struct atmel_nand *nand = to_atmel_nand(chip);
919 struct atmel_hsmc_nand_controller *nc;
Boris Brezillon41145642017-05-16 18:27:49 +0200920 int ret, status;
Boris Brezillonf88fc122017-03-16 09:02:40 +0100921
922 nc = to_hsmc_nand_controller(chip->controller);
923
924 atmel_nfc_copy_to_sram(chip, buf, false);
925
926 nc->op.cmds[0] = NAND_CMD_SEQIN;
927 nc->op.ncmds = 1;
928 atmel_nfc_set_op_addr(chip, page, 0x0);
929 nc->op.cs = nand->activecs->id;
930 nc->op.data = ATMEL_NFC_WRITE_DATA;
931
932 ret = atmel_nand_pmecc_enable(chip, NAND_ECC_WRITE, raw);
933 if (ret)
934 return ret;
935
936 ret = atmel_nfc_exec_op(nc, false);
937 if (ret) {
938 atmel_nand_pmecc_disable(chip, raw);
939 dev_err(nc->base.dev,
940 "Failed to transfer NAND page data (err = %d)\n",
941 ret);
942 return ret;
943 }
944
945 ret = atmel_nand_pmecc_generate_eccbytes(chip, raw);
946
947 atmel_nand_pmecc_disable(chip, raw);
948
949 if (ret)
950 return ret;
951
952 atmel_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
953
954 nc->op.cmds[0] = NAND_CMD_PAGEPROG;
955 nc->op.ncmds = 1;
956 nc->op.cs = nand->activecs->id;
957 ret = atmel_nfc_exec_op(nc, false);
958 if (ret)
959 dev_err(nc->base.dev, "Failed to program NAND page (err = %d)\n",
960 ret);
961
Boris Brezillon41145642017-05-16 18:27:49 +0200962 status = chip->waitfunc(mtd, chip);
963 if (status & NAND_STATUS_FAIL)
964 return -EIO;
965
Boris Brezillonf88fc122017-03-16 09:02:40 +0100966 return ret;
967}
968
969static int atmel_hsmc_nand_pmecc_write_page(struct mtd_info *mtd,
970 struct nand_chip *chip,
971 const u8 *buf, int oob_required,
972 int page)
973{
974 return atmel_hsmc_nand_pmecc_write_pg(chip, buf, oob_required, page,
975 false);
976}
977
978static int atmel_hsmc_nand_pmecc_write_page_raw(struct mtd_info *mtd,
979 struct nand_chip *chip,
980 const u8 *buf,
981 int oob_required, int page)
982{
983 return atmel_hsmc_nand_pmecc_write_pg(chip, buf, oob_required, page,
984 true);
985}
986
987static int atmel_hsmc_nand_pmecc_read_pg(struct nand_chip *chip, u8 *buf,
988 bool oob_required, int page,
989 bool raw)
990{
991 struct mtd_info *mtd = nand_to_mtd(chip);
992 struct atmel_nand *nand = to_atmel_nand(chip);
993 struct atmel_hsmc_nand_controller *nc;
994 int ret;
995
996 nc = to_hsmc_nand_controller(chip->controller);
997
998 /*
999 * Optimized read page accessors only work when the NAND R/B pin is
1000 * connected to a native SoC R/B pin. If that's not the case, fallback
1001 * to the non-optimized one.
1002 */
1003 if (nand->activecs->rb.type != ATMEL_NAND_NATIVE_RB) {
1004 chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
1005
1006 return atmel_nand_pmecc_read_pg(chip, buf, oob_required, page,
1007 raw);
1008 }
1009
1010 nc->op.cmds[nc->op.ncmds++] = NAND_CMD_READ0;
1011
1012 if (mtd->writesize > 512)
1013 nc->op.cmds[nc->op.ncmds++] = NAND_CMD_READSTART;
1014
1015 atmel_nfc_set_op_addr(chip, page, 0x0);
1016 nc->op.cs = nand->activecs->id;
1017 nc->op.data = ATMEL_NFC_READ_DATA;
1018
1019 ret = atmel_nand_pmecc_enable(chip, NAND_ECC_READ, raw);
1020 if (ret)
1021 return ret;
1022
1023 ret = atmel_nfc_exec_op(nc, false);
1024 if (ret) {
1025 atmel_nand_pmecc_disable(chip, raw);
1026 dev_err(nc->base.dev,
1027 "Failed to load NAND page data (err = %d)\n",
1028 ret);
1029 return ret;
1030 }
1031
1032 atmel_nfc_copy_from_sram(chip, buf, true);
1033
1034 ret = atmel_nand_pmecc_correct_data(chip, buf, raw);
1035
1036 atmel_nand_pmecc_disable(chip, raw);
1037
1038 return ret;
1039}
1040
1041static int atmel_hsmc_nand_pmecc_read_page(struct mtd_info *mtd,
1042 struct nand_chip *chip, u8 *buf,
1043 int oob_required, int page)
1044{
1045 return atmel_hsmc_nand_pmecc_read_pg(chip, buf, oob_required, page,
1046 false);
1047}
1048
1049static int atmel_hsmc_nand_pmecc_read_page_raw(struct mtd_info *mtd,
1050 struct nand_chip *chip,
1051 u8 *buf, int oob_required,
1052 int page)
1053{
1054 return atmel_hsmc_nand_pmecc_read_pg(chip, buf, oob_required, page,
1055 true);
1056}
1057
1058static int atmel_nand_pmecc_init(struct nand_chip *chip)
1059{
1060 struct mtd_info *mtd = nand_to_mtd(chip);
1061 struct atmel_nand *nand = to_atmel_nand(chip);
1062 struct atmel_nand_controller *nc;
1063 struct atmel_pmecc_user_req req;
1064
1065 nc = to_nand_controller(chip->controller);
1066
1067 if (!nc->pmecc) {
1068 dev_err(nc->dev, "HW ECC not supported\n");
1069 return -ENOTSUPP;
1070 }
1071
1072 if (nc->caps->legacy_of_bindings) {
1073 u32 val;
1074
1075 if (!of_property_read_u32(nc->dev->of_node, "atmel,pmecc-cap",
1076 &val))
1077 chip->ecc.strength = val;
1078
1079 if (!of_property_read_u32(nc->dev->of_node,
1080 "atmel,pmecc-sector-size",
1081 &val))
1082 chip->ecc.size = val;
1083 }
1084
1085 if (chip->ecc.options & NAND_ECC_MAXIMIZE)
1086 req.ecc.strength = ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH;
1087 else if (chip->ecc.strength)
1088 req.ecc.strength = chip->ecc.strength;
1089 else if (chip->ecc_strength_ds)
1090 req.ecc.strength = chip->ecc_strength_ds;
1091 else
1092 req.ecc.strength = ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH;
1093
1094 if (chip->ecc.size)
1095 req.ecc.sectorsize = chip->ecc.size;
1096 else if (chip->ecc_step_ds)
1097 req.ecc.sectorsize = chip->ecc_step_ds;
1098 else
1099 req.ecc.sectorsize = ATMEL_PMECC_SECTOR_SIZE_AUTO;
1100
1101 req.pagesize = mtd->writesize;
1102 req.oobsize = mtd->oobsize;
1103
1104 if (mtd->writesize <= 512) {
1105 req.ecc.bytes = 4;
1106 req.ecc.ooboffset = 0;
1107 } else {
1108 req.ecc.bytes = mtd->oobsize - 2;
1109 req.ecc.ooboffset = ATMEL_PMECC_OOBOFFSET_AUTO;
1110 }
1111
1112 nand->pmecc = atmel_pmecc_create_user(nc->pmecc, &req);
1113 if (IS_ERR(nand->pmecc))
1114 return PTR_ERR(nand->pmecc);
1115
1116 chip->ecc.algo = NAND_ECC_BCH;
1117 chip->ecc.size = req.ecc.sectorsize;
1118 chip->ecc.bytes = req.ecc.bytes / req.ecc.nsectors;
1119 chip->ecc.strength = req.ecc.strength;
1120
1121 chip->options |= NAND_NO_SUBPAGE_WRITE;
1122
1123 mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
1124
1125 return 0;
1126}
1127
1128static int atmel_nand_ecc_init(struct atmel_nand *nand)
1129{
1130 struct nand_chip *chip = &nand->base;
1131 struct atmel_nand_controller *nc;
1132 int ret;
1133
1134 nc = to_nand_controller(chip->controller);
1135
1136 switch (chip->ecc.mode) {
1137 case NAND_ECC_NONE:
1138 case NAND_ECC_SOFT:
1139 /*
1140 * Nothing to do, the core will initialize everything for us.
1141 */
1142 break;
1143
1144 case NAND_ECC_HW:
1145 ret = atmel_nand_pmecc_init(chip);
1146 if (ret)
1147 return ret;
1148
1149 chip->ecc.read_page = atmel_nand_pmecc_read_page;
1150 chip->ecc.write_page = atmel_nand_pmecc_write_page;
1151 chip->ecc.read_page_raw = atmel_nand_pmecc_read_page_raw;
1152 chip->ecc.write_page_raw = atmel_nand_pmecc_write_page_raw;
1153 break;
1154
1155 default:
1156 /* Other modes are not supported. */
1157 dev_err(nc->dev, "Unsupported ECC mode: %d\n",
1158 chip->ecc.mode);
1159 return -ENOTSUPP;
1160 }
1161
1162 return 0;
1163}
1164
1165static int atmel_hsmc_nand_ecc_init(struct atmel_nand *nand)
1166{
1167 struct nand_chip *chip = &nand->base;
1168 int ret;
1169
1170 ret = atmel_nand_ecc_init(nand);
1171 if (ret)
1172 return ret;
1173
1174 if (chip->ecc.mode != NAND_ECC_HW)
1175 return 0;
1176
1177 /* Adjust the ECC operations for the HSMC IP. */
1178 chip->ecc.read_page = atmel_hsmc_nand_pmecc_read_page;
1179 chip->ecc.write_page = atmel_hsmc_nand_pmecc_write_page;
1180 chip->ecc.read_page_raw = atmel_hsmc_nand_pmecc_read_page_raw;
1181 chip->ecc.write_page_raw = atmel_hsmc_nand_pmecc_write_page_raw;
1182 chip->ecc.options |= NAND_ECC_CUSTOM_PAGE_ACCESS;
1183
1184 return 0;
1185}
1186
Boris Brezillonf9ce2ed2017-03-16 09:35:59 +01001187static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand,
1188 const struct nand_data_interface *conf,
1189 struct atmel_smc_cs_conf *smcconf)
1190{
1191 u32 ncycles, totalcycles, timeps, mckperiodps;
1192 struct atmel_nand_controller *nc;
1193 int ret;
1194
1195 nc = to_nand_controller(nand->base.controller);
1196
1197 /* DDR interface not supported. */
1198 if (conf->type != NAND_SDR_IFACE)
1199 return -ENOTSUPP;
1200
1201 /*
1202 * tRC < 30ns implies EDO mode. This controller does not support this
1203 * mode.
1204 */
1205 if (conf->timings.sdr.tRC_min < 30)
1206 return -ENOTSUPP;
1207
1208 atmel_smc_cs_conf_init(smcconf);
1209
1210 mckperiodps = NSEC_PER_SEC / clk_get_rate(nc->mck);
1211 mckperiodps *= 1000;
1212
1213 /*
1214 * Set write pulse timing. This one is easy to extract:
1215 *
1216 * NWE_PULSE = tWP
1217 */
1218 ncycles = DIV_ROUND_UP(conf->timings.sdr.tWP_min, mckperiodps);
1219 totalcycles = ncycles;
1220 ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NWE_SHIFT,
1221 ncycles);
1222 if (ret)
1223 return ret;
1224
1225 /*
1226 * The write setup timing depends on the operation done on the NAND.
1227 * All operations goes through the same data bus, but the operation
1228 * type depends on the address we are writing to (ALE/CLE address
1229 * lines).
1230 * Since we have no way to differentiate the different operations at
1231 * the SMC level, we must consider the worst case (the biggest setup
1232 * time among all operation types):
1233 *
1234 * NWE_SETUP = max(tCLS, tCS, tALS, tDS) - NWE_PULSE
1235 */
1236 timeps = max3(conf->timings.sdr.tCLS_min, conf->timings.sdr.tCS_min,
1237 conf->timings.sdr.tALS_min);
1238 timeps = max(timeps, conf->timings.sdr.tDS_min);
1239 ncycles = DIV_ROUND_UP(timeps, mckperiodps);
1240 ncycles = ncycles > totalcycles ? ncycles - totalcycles : 0;
1241 totalcycles += ncycles;
1242 ret = atmel_smc_cs_conf_set_setup(smcconf, ATMEL_SMC_NWE_SHIFT,
1243 ncycles);
1244 if (ret)
1245 return ret;
1246
1247 /*
1248 * As for the write setup timing, the write hold timing depends on the
1249 * operation done on the NAND:
1250 *
1251 * NWE_HOLD = max(tCLH, tCH, tALH, tDH, tWH)
1252 */
1253 timeps = max3(conf->timings.sdr.tCLH_min, conf->timings.sdr.tCH_min,
1254 conf->timings.sdr.tALH_min);
1255 timeps = max3(timeps, conf->timings.sdr.tDH_min,
1256 conf->timings.sdr.tWH_min);
1257 ncycles = DIV_ROUND_UP(timeps, mckperiodps);
1258 totalcycles += ncycles;
1259
1260 /*
1261 * The write cycle timing is directly matching tWC, but is also
1262 * dependent on the other timings on the setup and hold timings we
1263 * calculated earlier, which gives:
1264 *
1265 * NWE_CYCLE = max(tWC, NWE_SETUP + NWE_PULSE + NWE_HOLD)
1266 */
1267 ncycles = DIV_ROUND_UP(conf->timings.sdr.tWC_min, mckperiodps);
1268 ncycles = max(totalcycles, ncycles);
1269 ret = atmel_smc_cs_conf_set_cycle(smcconf, ATMEL_SMC_NWE_SHIFT,
1270 ncycles);
1271 if (ret)
1272 return ret;
1273
1274 /*
1275 * We don't want the CS line to be toggled between each byte/word
1276 * transfer to the NAND. The only way to guarantee that is to have the
1277 * NCS_{WR,RD}_{SETUP,HOLD} timings set to 0, which in turn means:
1278 *
1279 * NCS_WR_PULSE = NWE_CYCLE
1280 */
1281 ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NCS_WR_SHIFT,
1282 ncycles);
1283 if (ret)
1284 return ret;
1285
1286 /*
1287 * As for the write setup timing, the read hold timing depends on the
1288 * operation done on the NAND:
1289 *
1290 * NRD_HOLD = max(tREH, tRHOH)
1291 */
1292 timeps = max(conf->timings.sdr.tREH_min, conf->timings.sdr.tRHOH_min);
1293 ncycles = DIV_ROUND_UP(timeps, mckperiodps);
1294 totalcycles = ncycles;
1295
1296 /*
1297 * TDF = tRHZ - NRD_HOLD
1298 */
1299 ncycles = DIV_ROUND_UP(conf->timings.sdr.tRHZ_max, mckperiodps);
1300 ncycles -= totalcycles;
1301
1302 /*
1303 * In ONFI 4.0 specs, tRHZ has been increased to support EDO NANDs and
1304 * we might end up with a config that does not fit in the TDF field.
1305 * Just take the max value in this case and hope that the NAND is more
1306 * tolerant than advertised.
1307 */
1308 if (ncycles > ATMEL_SMC_MODE_TDF_MAX)
1309 ncycles = ATMEL_SMC_MODE_TDF_MAX;
1310 else if (ncycles < ATMEL_SMC_MODE_TDF_MIN)
1311 ncycles = ATMEL_SMC_MODE_TDF_MIN;
1312
1313 smcconf->mode |= ATMEL_SMC_MODE_TDF(ncycles) |
1314 ATMEL_SMC_MODE_TDFMODE_OPTIMIZED;
1315
1316 /*
1317 * Read pulse timing directly matches tRP:
1318 *
1319 * NRD_PULSE = tRP
1320 */
1321 ncycles = DIV_ROUND_UP(conf->timings.sdr.tRP_min, mckperiodps);
1322 totalcycles += ncycles;
1323 ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NRD_SHIFT,
1324 ncycles);
1325 if (ret)
1326 return ret;
1327
1328 /*
1329 * The write cycle timing is directly matching tWC, but is also
1330 * dependent on the setup and hold timings we calculated earlier,
1331 * which gives:
1332 *
1333 * NRD_CYCLE = max(tRC, NRD_PULSE + NRD_HOLD)
1334 *
1335 * NRD_SETUP is always 0.
1336 */
1337 ncycles = DIV_ROUND_UP(conf->timings.sdr.tRC_min, mckperiodps);
1338 ncycles = max(totalcycles, ncycles);
1339 ret = atmel_smc_cs_conf_set_cycle(smcconf, ATMEL_SMC_NRD_SHIFT,
1340 ncycles);
1341 if (ret)
1342 return ret;
1343
1344 /*
1345 * We don't want the CS line to be toggled between each byte/word
1346 * transfer from the NAND. The only way to guarantee that is to have
1347 * the NCS_{WR,RD}_{SETUP,HOLD} timings set to 0, which in turn means:
1348 *
1349 * NCS_RD_PULSE = NRD_CYCLE
1350 */
1351 ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NCS_RD_SHIFT,
1352 ncycles);
1353 if (ret)
1354 return ret;
1355
1356 /* Txxx timings are directly matching tXXX ones. */
1357 ncycles = DIV_ROUND_UP(conf->timings.sdr.tCLR_min, mckperiodps);
1358 ret = atmel_smc_cs_conf_set_timing(smcconf,
1359 ATMEL_HSMC_TIMINGS_TCLR_SHIFT,
1360 ncycles);
1361 if (ret)
1362 return ret;
1363
1364 ncycles = DIV_ROUND_UP(conf->timings.sdr.tADL_min, mckperiodps);
1365 ret = atmel_smc_cs_conf_set_timing(smcconf,
1366 ATMEL_HSMC_TIMINGS_TADL_SHIFT,
1367 ncycles);
1368 if (ret)
1369 return ret;
1370
1371 ncycles = DIV_ROUND_UP(conf->timings.sdr.tAR_min, mckperiodps);
1372 ret = atmel_smc_cs_conf_set_timing(smcconf,
1373 ATMEL_HSMC_TIMINGS_TAR_SHIFT,
1374 ncycles);
1375 if (ret)
1376 return ret;
1377
1378 ncycles = DIV_ROUND_UP(conf->timings.sdr.tRR_min, mckperiodps);
1379 ret = atmel_smc_cs_conf_set_timing(smcconf,
1380 ATMEL_HSMC_TIMINGS_TRR_SHIFT,
1381 ncycles);
1382 if (ret)
1383 return ret;
1384
1385 ncycles = DIV_ROUND_UP(conf->timings.sdr.tWB_max, mckperiodps);
1386 ret = atmel_smc_cs_conf_set_timing(smcconf,
1387 ATMEL_HSMC_TIMINGS_TWB_SHIFT,
1388 ncycles);
1389 if (ret)
1390 return ret;
1391
1392 /* Attach the CS line to the NFC logic. */
1393 smcconf->timings |= ATMEL_HSMC_TIMINGS_NFSEL;
1394
1395 /* Set the appropriate data bus width. */
1396 if (nand->base.options & NAND_BUSWIDTH_16)
1397 smcconf->mode |= ATMEL_SMC_MODE_DBW_16;
1398
1399 /* Operate in NRD/NWE READ/WRITEMODE. */
1400 smcconf->mode |= ATMEL_SMC_MODE_READMODE_NRD |
1401 ATMEL_SMC_MODE_WRITEMODE_NWE;
1402
1403 return 0;
1404}
1405
1406static int atmel_smc_nand_setup_data_interface(struct atmel_nand *nand,
1407 int csline,
1408 const struct nand_data_interface *conf)
1409{
1410 struct atmel_nand_controller *nc;
1411 struct atmel_smc_cs_conf smcconf;
1412 struct atmel_nand_cs *cs;
1413 int ret;
1414
1415 nc = to_nand_controller(nand->base.controller);
1416
1417 ret = atmel_smc_nand_prepare_smcconf(nand, conf, &smcconf);
1418 if (ret)
1419 return ret;
1420
1421 if (csline == NAND_DATA_IFACE_CHECK_ONLY)
1422 return 0;
1423
1424 cs = &nand->cs[csline];
1425 cs->smcconf = smcconf;
1426 atmel_smc_cs_conf_apply(nc->smc, cs->id, &cs->smcconf);
1427
1428 return 0;
1429}
1430
1431static int atmel_hsmc_nand_setup_data_interface(struct atmel_nand *nand,
1432 int csline,
1433 const struct nand_data_interface *conf)
1434{
1435 struct atmel_nand_controller *nc;
1436 struct atmel_smc_cs_conf smcconf;
1437 struct atmel_nand_cs *cs;
1438 int ret;
1439
1440 nc = to_nand_controller(nand->base.controller);
1441
1442 ret = atmel_smc_nand_prepare_smcconf(nand, conf, &smcconf);
1443 if (ret)
1444 return ret;
1445
1446 if (csline == NAND_DATA_IFACE_CHECK_ONLY)
1447 return 0;
1448
1449 cs = &nand->cs[csline];
1450 cs->smcconf = smcconf;
1451
1452 if (cs->rb.type == ATMEL_NAND_NATIVE_RB)
1453 cs->smcconf.timings |= ATMEL_HSMC_TIMINGS_RBNSEL(cs->rb.id);
1454
1455 atmel_hsmc_cs_conf_apply(nc->smc, cs->id, &cs->smcconf);
1456
1457 return 0;
1458}
1459
1460static int atmel_nand_setup_data_interface(struct mtd_info *mtd, int csline,
1461 const struct nand_data_interface *conf)
1462{
1463 struct nand_chip *chip = mtd_to_nand(mtd);
1464 struct atmel_nand *nand = to_atmel_nand(chip);
1465 struct atmel_nand_controller *nc;
1466
1467 nc = to_nand_controller(nand->base.controller);
1468
1469 if (csline >= nand->numcs ||
1470 (csline < 0 && csline != NAND_DATA_IFACE_CHECK_ONLY))
1471 return -EINVAL;
1472
1473 return nc->caps->ops->setup_data_interface(nand, csline, conf);
1474}
1475
Boris Brezillonf88fc122017-03-16 09:02:40 +01001476static void atmel_nand_init(struct atmel_nand_controller *nc,
1477 struct atmel_nand *nand)
1478{
1479 struct nand_chip *chip = &nand->base;
1480 struct mtd_info *mtd = nand_to_mtd(chip);
1481
1482 mtd->dev.parent = nc->dev;
1483 nand->base.controller = &nc->base;
1484
1485 chip->cmd_ctrl = atmel_nand_cmd_ctrl;
1486 chip->read_byte = atmel_nand_read_byte;
1487 chip->read_word = atmel_nand_read_word;
1488 chip->write_byte = atmel_nand_write_byte;
1489 chip->read_buf = atmel_nand_read_buf;
1490 chip->write_buf = atmel_nand_write_buf;
1491 chip->select_chip = atmel_nand_select_chip;
1492
Boris Brezillonf9ce2ed2017-03-16 09:35:59 +01001493 if (nc->mck && nc->caps->ops->setup_data_interface)
1494 chip->setup_data_interface = atmel_nand_setup_data_interface;
1495
Boris Brezillonf88fc122017-03-16 09:02:40 +01001496 /* Some NANDs require a longer delay than the default one (20us). */
1497 chip->chip_delay = 40;
1498
1499 /*
1500 * Use a bounce buffer when the buffer passed by the MTD user is not
1501 * suitable for DMA.
1502 */
1503 if (nc->dmac)
1504 chip->options |= NAND_USE_BOUNCE_BUFFER;
1505
1506 /* Default to HW ECC if pmecc is available. */
1507 if (nc->pmecc)
1508 chip->ecc.mode = NAND_ECC_HW;
1509}
1510
1511static void atmel_smc_nand_init(struct atmel_nand_controller *nc,
1512 struct atmel_nand *nand)
1513{
1514 struct nand_chip *chip = &nand->base;
1515 struct atmel_smc_nand_controller *smc_nc;
1516 int i;
1517
1518 atmel_nand_init(nc, nand);
1519
1520 smc_nc = to_smc_nand_controller(chip->controller);
1521 if (!smc_nc->matrix)
1522 return;
1523
1524 /* Attach the CS to the NAND Flash logic. */
1525 for (i = 0; i < nand->numcs; i++)
1526 regmap_update_bits(smc_nc->matrix, smc_nc->ebi_csa_offs,
1527 BIT(nand->cs[i].id), BIT(nand->cs[i].id));
1528}
1529
1530static void atmel_hsmc_nand_init(struct atmel_nand_controller *nc,
1531 struct atmel_nand *nand)
1532{
1533 struct nand_chip *chip = &nand->base;
1534
1535 atmel_nand_init(nc, nand);
1536
1537 /* Overload some methods for the HSMC controller. */
1538 chip->cmd_ctrl = atmel_hsmc_nand_cmd_ctrl;
1539 chip->select_chip = atmel_hsmc_nand_select_chip;
1540}
1541
1542static int atmel_nand_detect(struct atmel_nand *nand)
1543{
1544 struct nand_chip *chip = &nand->base;
1545 struct mtd_info *mtd = nand_to_mtd(chip);
1546 struct atmel_nand_controller *nc;
1547 int ret;
1548
1549 nc = to_nand_controller(chip->controller);
1550
1551 ret = nand_scan_ident(mtd, nand->numcs, NULL);
1552 if (ret)
1553 dev_err(nc->dev, "nand_scan_ident() failed: %d\n", ret);
1554
1555 return ret;
1556}
1557
1558static int atmel_nand_unregister(struct atmel_nand *nand)
1559{
1560 struct nand_chip *chip = &nand->base;
1561 struct mtd_info *mtd = nand_to_mtd(chip);
1562 int ret;
1563
1564 ret = mtd_device_unregister(mtd);
1565 if (ret)
1566 return ret;
1567
1568 nand_cleanup(chip);
1569 list_del(&nand->node);
1570
1571 return 0;
1572}
1573
1574static int atmel_nand_register(struct atmel_nand *nand)
1575{
1576 struct nand_chip *chip = &nand->base;
1577 struct mtd_info *mtd = nand_to_mtd(chip);
1578 struct atmel_nand_controller *nc;
1579 int ret;
1580
1581 nc = to_nand_controller(chip->controller);
1582
1583 if (nc->caps->legacy_of_bindings || !nc->dev->of_node) {
1584 /*
1585 * We keep the MTD name unchanged to avoid breaking platforms
1586 * where the MTD cmdline parser is used and the bootloader
1587 * has not been updated to use the new naming scheme.
1588 */
1589 mtd->name = "atmel_nand";
1590 } else if (!mtd->name) {
1591 /*
1592 * If the new bindings are used and the bootloader has not been
1593 * updated to pass a new mtdparts parameter on the cmdline, you
1594 * should define the following property in your nand node:
1595 *
1596 * label = "atmel_nand";
1597 *
1598 * This way, mtd->name will be set by the core when
1599 * nand_set_flash_node() is called.
1600 */
1601 mtd->name = devm_kasprintf(nc->dev, GFP_KERNEL,
1602 "%s:nand.%d", dev_name(nc->dev),
1603 nand->cs[0].id);
1604 if (!mtd->name) {
1605 dev_err(nc->dev, "Failed to allocate mtd->name\n");
1606 return -ENOMEM;
1607 }
1608 }
1609
1610 ret = nand_scan_tail(mtd);
1611 if (ret) {
1612 dev_err(nc->dev, "nand_scan_tail() failed: %d\n", ret);
1613 return ret;
1614 }
1615
1616 ret = mtd_device_register(mtd, NULL, 0);
1617 if (ret) {
1618 dev_err(nc->dev, "Failed to register mtd device: %d\n", ret);
1619 nand_cleanup(chip);
1620 return ret;
1621 }
1622
1623 list_add_tail(&nand->node, &nc->chips);
1624
1625 return 0;
1626}
1627
1628static struct atmel_nand *atmel_nand_create(struct atmel_nand_controller *nc,
1629 struct device_node *np,
1630 int reg_cells)
1631{
1632 struct atmel_nand *nand;
1633 struct gpio_desc *gpio;
1634 int numcs, ret, i;
1635
1636 numcs = of_property_count_elems_of_size(np, "reg",
1637 reg_cells * sizeof(u32));
1638 if (numcs < 1) {
1639 dev_err(nc->dev, "Missing or invalid reg property\n");
1640 return ERR_PTR(-EINVAL);
1641 }
1642
1643 nand = devm_kzalloc(nc->dev,
1644 sizeof(*nand) + (numcs * sizeof(*nand->cs)),
1645 GFP_KERNEL);
1646 if (!nand) {
1647 dev_err(nc->dev, "Failed to allocate NAND object\n");
1648 return ERR_PTR(-ENOMEM);
1649 }
1650
1651 nand->numcs = numcs;
1652
1653 gpio = devm_fwnode_get_index_gpiod_from_child(nc->dev, "det", 0,
1654 &np->fwnode, GPIOD_IN,
1655 "nand-det");
1656 if (IS_ERR(gpio) && PTR_ERR(gpio) != -ENOENT) {
1657 dev_err(nc->dev,
1658 "Failed to get detect gpio (err = %ld)\n",
1659 PTR_ERR(gpio));
1660 return ERR_CAST(gpio);
1661 }
1662
1663 if (!IS_ERR(gpio))
1664 nand->cdgpio = gpio;
1665
1666 for (i = 0; i < numcs; i++) {
1667 struct resource res;
1668 u32 val;
1669
1670 ret = of_address_to_resource(np, 0, &res);
1671 if (ret) {
1672 dev_err(nc->dev, "Invalid reg property (err = %d)\n",
1673 ret);
1674 return ERR_PTR(ret);
1675 }
1676
1677 ret = of_property_read_u32_index(np, "reg", i * reg_cells,
1678 &val);
1679 if (ret) {
1680 dev_err(nc->dev, "Invalid reg property (err = %d)\n",
1681 ret);
1682 return ERR_PTR(ret);
1683 }
1684
1685 nand->cs[i].id = val;
1686
1687 nand->cs[i].io.dma = res.start;
1688 nand->cs[i].io.virt = devm_ioremap_resource(nc->dev, &res);
1689 if (IS_ERR(nand->cs[i].io.virt))
1690 return ERR_CAST(nand->cs[i].io.virt);
1691
1692 if (!of_property_read_u32(np, "atmel,rb", &val)) {
1693 if (val > ATMEL_NFC_MAX_RB_ID)
1694 return ERR_PTR(-EINVAL);
1695
1696 nand->cs[i].rb.type = ATMEL_NAND_NATIVE_RB;
1697 nand->cs[i].rb.id = val;
1698 } else {
1699 gpio = devm_fwnode_get_index_gpiod_from_child(nc->dev,
1700 "rb", i, &np->fwnode,
1701 GPIOD_IN, "nand-rb");
1702 if (IS_ERR(gpio) && PTR_ERR(gpio) != -ENOENT) {
1703 dev_err(nc->dev,
1704 "Failed to get R/B gpio (err = %ld)\n",
1705 PTR_ERR(gpio));
1706 return ERR_CAST(gpio);
1707 }
1708
1709 if (!IS_ERR(gpio)) {
1710 nand->cs[i].rb.type = ATMEL_NAND_GPIO_RB;
1711 nand->cs[i].rb.gpio = gpio;
1712 }
1713 }
1714
1715 gpio = devm_fwnode_get_index_gpiod_from_child(nc->dev, "cs",
1716 i, &np->fwnode,
1717 GPIOD_OUT_HIGH,
1718 "nand-cs");
1719 if (IS_ERR(gpio) && PTR_ERR(gpio) != -ENOENT) {
1720 dev_err(nc->dev,
1721 "Failed to get CS gpio (err = %ld)\n",
1722 PTR_ERR(gpio));
1723 return ERR_CAST(gpio);
1724 }
1725
1726 if (!IS_ERR(gpio))
1727 nand->cs[i].csgpio = gpio;
1728 }
1729
1730 nand_set_flash_node(&nand->base, np);
1731
1732 return nand;
1733}
1734
1735static int
1736atmel_nand_controller_add_nand(struct atmel_nand_controller *nc,
1737 struct atmel_nand *nand)
1738{
1739 int ret;
1740
1741 /* No card inserted, skip this NAND. */
1742 if (nand->cdgpio && gpiod_get_value(nand->cdgpio)) {
1743 dev_info(nc->dev, "No SmartMedia card inserted.\n");
1744 return 0;
1745 }
1746
1747 nc->caps->ops->nand_init(nc, nand);
1748
1749 ret = atmel_nand_detect(nand);
1750 if (ret)
1751 return ret;
1752
1753 ret = nc->caps->ops->ecc_init(nand);
1754 if (ret)
1755 return ret;
1756
1757 return atmel_nand_register(nand);
1758}
1759
1760static int
1761atmel_nand_controller_remove_nands(struct atmel_nand_controller *nc)
1762{
1763 struct atmel_nand *nand, *tmp;
1764 int ret;
1765
1766 list_for_each_entry_safe(nand, tmp, &nc->chips, node) {
1767 ret = atmel_nand_unregister(nand);
1768 if (ret)
1769 return ret;
1770 }
1771
1772 return 0;
1773}
1774
1775static int
1776atmel_nand_controller_legacy_add_nands(struct atmel_nand_controller *nc)
1777{
1778 struct device *dev = nc->dev;
1779 struct platform_device *pdev = to_platform_device(dev);
1780 struct atmel_nand *nand;
1781 struct gpio_desc *gpio;
1782 struct resource *res;
1783
1784 /*
1785 * Legacy bindings only allow connecting a single NAND with a unique CS
1786 * line to the controller.
1787 */
1788 nand = devm_kzalloc(nc->dev, sizeof(*nand) + sizeof(*nand->cs),
1789 GFP_KERNEL);
1790 if (!nand)
1791 return -ENOMEM;
1792
1793 nand->numcs = 1;
1794
1795 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1796 nand->cs[0].io.virt = devm_ioremap_resource(dev, res);
1797 if (IS_ERR(nand->cs[0].io.virt))
1798 return PTR_ERR(nand->cs[0].io.virt);
1799
1800 nand->cs[0].io.dma = res->start;
1801
1802 /*
1803 * The old driver was hardcoding the CS id to 3 for all sama5
1804 * controllers. Since this id is only meaningful for the sama5
1805 * controller we can safely assign this id to 3 no matter the
1806 * controller.
1807 * If one wants to connect a NAND to a different CS line, he will
1808 * have to use the new bindings.
1809 */
1810 nand->cs[0].id = 3;
1811
1812 /* R/B GPIO. */
1813 gpio = devm_gpiod_get_index_optional(dev, NULL, 0, GPIOD_IN);
1814 if (IS_ERR(gpio)) {
1815 dev_err(dev, "Failed to get R/B gpio (err = %ld)\n",
1816 PTR_ERR(gpio));
1817 return PTR_ERR(gpio);
1818 }
1819
1820 if (gpio) {
1821 nand->cs[0].rb.type = ATMEL_NAND_GPIO_RB;
1822 nand->cs[0].rb.gpio = gpio;
1823 }
1824
1825 /* CS GPIO. */
1826 gpio = devm_gpiod_get_index_optional(dev, NULL, 1, GPIOD_OUT_HIGH);
1827 if (IS_ERR(gpio)) {
1828 dev_err(dev, "Failed to get CS gpio (err = %ld)\n",
1829 PTR_ERR(gpio));
1830 return PTR_ERR(gpio);
1831 }
1832
1833 nand->cs[0].csgpio = gpio;
1834
1835 /* Card detect GPIO. */
1836 gpio = devm_gpiod_get_index_optional(nc->dev, NULL, 2, GPIOD_IN);
1837 if (IS_ERR(gpio)) {
1838 dev_err(dev,
1839 "Failed to get detect gpio (err = %ld)\n",
1840 PTR_ERR(gpio));
1841 return PTR_ERR(gpio);
1842 }
1843
1844 nand->cdgpio = gpio;
1845
1846 nand_set_flash_node(&nand->base, nc->dev->of_node);
1847
1848 return atmel_nand_controller_add_nand(nc, nand);
1849}
1850
1851static int atmel_nand_controller_add_nands(struct atmel_nand_controller *nc)
1852{
1853 struct device_node *np, *nand_np;
1854 struct device *dev = nc->dev;
1855 int ret, reg_cells;
1856 u32 val;
1857
1858 /* We do not retrieve the SMC syscon when parsing old DTs. */
1859 if (nc->caps->legacy_of_bindings)
1860 return atmel_nand_controller_legacy_add_nands(nc);
1861
1862 np = dev->of_node;
1863
1864 ret = of_property_read_u32(np, "#address-cells", &val);
1865 if (ret) {
1866 dev_err(dev, "missing #address-cells property\n");
1867 return ret;
1868 }
1869
1870 reg_cells = val;
1871
1872 ret = of_property_read_u32(np, "#size-cells", &val);
1873 if (ret) {
1874 dev_err(dev, "missing #address-cells property\n");
1875 return ret;
1876 }
1877
1878 reg_cells += val;
1879
1880 for_each_child_of_node(np, nand_np) {
1881 struct atmel_nand *nand;
1882
1883 nand = atmel_nand_create(nc, nand_np, reg_cells);
1884 if (IS_ERR(nand)) {
1885 ret = PTR_ERR(nand);
1886 goto err;
1887 }
1888
1889 ret = atmel_nand_controller_add_nand(nc, nand);
1890 if (ret)
1891 goto err;
1892 }
1893
1894 return 0;
1895
1896err:
1897 atmel_nand_controller_remove_nands(nc);
1898
1899 return ret;
1900}
1901
1902static void atmel_nand_controller_cleanup(struct atmel_nand_controller *nc)
1903{
1904 if (nc->dmac)
1905 dma_release_channel(nc->dmac);
1906
1907 clk_put(nc->mck);
1908}
1909
1910static const struct of_device_id atmel_matrix_of_ids[] = {
1911 {
1912 .compatible = "atmel,at91sam9260-matrix",
1913 .data = (void *)AT91SAM9260_MATRIX_EBICSA,
1914 },
1915 {
1916 .compatible = "atmel,at91sam9261-matrix",
1917 .data = (void *)AT91SAM9261_MATRIX_EBICSA,
1918 },
1919 {
1920 .compatible = "atmel,at91sam9263-matrix",
1921 .data = (void *)AT91SAM9263_MATRIX_EBI0CSA,
1922 },
1923 {
1924 .compatible = "atmel,at91sam9rl-matrix",
1925 .data = (void *)AT91SAM9RL_MATRIX_EBICSA,
1926 },
1927 {
1928 .compatible = "atmel,at91sam9g45-matrix",
1929 .data = (void *)AT91SAM9G45_MATRIX_EBICSA,
1930 },
1931 {
1932 .compatible = "atmel,at91sam9n12-matrix",
1933 .data = (void *)AT91SAM9N12_MATRIX_EBICSA,
1934 },
1935 {
1936 .compatible = "atmel,at91sam9x5-matrix",
1937 .data = (void *)AT91SAM9X5_MATRIX_EBICSA,
1938 },
Christophe Jaillet038e8ad6e2017-04-11 07:22:52 +02001939 { /* sentinel */ },
Boris Brezillonf88fc122017-03-16 09:02:40 +01001940};
1941
1942static int atmel_nand_controller_init(struct atmel_nand_controller *nc,
1943 struct platform_device *pdev,
1944 const struct atmel_nand_controller_caps *caps)
1945{
1946 struct device *dev = &pdev->dev;
1947 struct device_node *np = dev->of_node;
1948 int ret;
1949
1950 nand_hw_control_init(&nc->base);
1951 INIT_LIST_HEAD(&nc->chips);
1952 nc->dev = dev;
1953 nc->caps = caps;
1954
1955 platform_set_drvdata(pdev, nc);
1956
1957 nc->pmecc = devm_atmel_pmecc_get(dev);
1958 if (IS_ERR(nc->pmecc)) {
1959 ret = PTR_ERR(nc->pmecc);
1960 if (ret != -EPROBE_DEFER)
1961 dev_err(dev, "Could not get PMECC object (err = %d)\n",
1962 ret);
1963 return ret;
1964 }
1965
1966 if (nc->caps->has_dma) {
1967 dma_cap_mask_t mask;
1968
1969 dma_cap_zero(mask);
1970 dma_cap_set(DMA_MEMCPY, mask);
1971
1972 nc->dmac = dma_request_channel(mask, NULL, NULL);
1973 if (!nc->dmac)
1974 dev_err(nc->dev, "Failed to request DMA channel\n");
1975 }
1976
1977 /* We do not retrieve the SMC syscon when parsing old DTs. */
1978 if (nc->caps->legacy_of_bindings)
1979 return 0;
1980
Boris Brezillonf9ce2ed2017-03-16 09:35:59 +01001981 nc->mck = of_clk_get(dev->parent->of_node, 0);
1982 if (IS_ERR(nc->mck)) {
1983 dev_err(dev, "Failed to retrieve MCK clk\n");
1984 return PTR_ERR(nc->mck);
1985 }
1986
Boris Brezillonf88fc122017-03-16 09:02:40 +01001987 np = of_parse_phandle(dev->parent->of_node, "atmel,smc", 0);
1988 if (!np) {
1989 dev_err(dev, "Missing or invalid atmel,smc property\n");
1990 return -EINVAL;
1991 }
1992
1993 nc->smc = syscon_node_to_regmap(np);
1994 of_node_put(np);
1995 if (IS_ERR(nc->smc)) {
Dan Carpenter70106dd2017-04-04 11:15:46 +03001996 ret = PTR_ERR(nc->smc);
Boris Brezillonf88fc122017-03-16 09:02:40 +01001997 dev_err(dev, "Could not get SMC regmap (err = %d)\n", ret);
1998 return ret;
1999 }
2000
2001 return 0;
2002}
2003
2004static int
2005atmel_smc_nand_controller_init(struct atmel_smc_nand_controller *nc)
2006{
2007 struct device *dev = nc->base.dev;
2008 const struct of_device_id *match;
2009 struct device_node *np;
2010 int ret;
2011
2012 /* We do not retrieve the matrix syscon when parsing old DTs. */
2013 if (nc->base.caps->legacy_of_bindings)
2014 return 0;
2015
2016 np = of_parse_phandle(dev->parent->of_node, "atmel,matrix", 0);
2017 if (!np)
2018 return 0;
2019
2020 match = of_match_node(atmel_matrix_of_ids, np);
2021 if (!match) {
2022 of_node_put(np);
2023 return 0;
2024 }
2025
2026 nc->matrix = syscon_node_to_regmap(np);
2027 of_node_put(np);
2028 if (IS_ERR(nc->matrix)) {
Dan Carpenter70106dd2017-04-04 11:15:46 +03002029 ret = PTR_ERR(nc->matrix);
Boris Brezillonf88fc122017-03-16 09:02:40 +01002030 dev_err(dev, "Could not get Matrix regmap (err = %d)\n", ret);
2031 return ret;
2032 }
2033
2034 nc->ebi_csa_offs = (unsigned int)match->data;
2035
2036 /*
2037 * The at91sam9263 has 2 EBIs, if the NAND controller is under EBI1
2038 * add 4 to ->ebi_csa_offs.
2039 */
2040 if (of_device_is_compatible(dev->parent->of_node,
2041 "atmel,at91sam9263-ebi1"))
2042 nc->ebi_csa_offs += 4;
2043
2044 return 0;
2045}
2046
2047static int
2048atmel_hsmc_nand_controller_legacy_init(struct atmel_hsmc_nand_controller *nc)
2049{
2050 struct regmap_config regmap_conf = {
2051 .reg_bits = 32,
2052 .val_bits = 32,
2053 .reg_stride = 4,
2054 };
2055
2056 struct device *dev = nc->base.dev;
2057 struct device_node *nand_np, *nfc_np;
2058 void __iomem *iomem;
2059 struct resource res;
2060 int ret;
2061
2062 nand_np = dev->of_node;
2063 nfc_np = of_find_compatible_node(dev->of_node, NULL,
2064 "atmel,sama5d3-nfc");
2065
2066 nc->clk = of_clk_get(nfc_np, 0);
2067 if (IS_ERR(nc->clk)) {
2068 ret = PTR_ERR(nc->clk);
2069 dev_err(dev, "Failed to retrieve HSMC clock (err = %d)\n",
2070 ret);
2071 goto out;
2072 }
2073
2074 ret = clk_prepare_enable(nc->clk);
2075 if (ret) {
2076 dev_err(dev, "Failed to enable the HSMC clock (err = %d)\n",
2077 ret);
2078 goto out;
2079 }
2080
2081 nc->irq = of_irq_get(nand_np, 0);
2082 if (nc->irq < 0) {
2083 ret = nc->irq;
2084 if (ret != -EPROBE_DEFER)
2085 dev_err(dev, "Failed to get IRQ number (err = %d)\n",
2086 ret);
2087 goto out;
2088 }
2089
2090 ret = of_address_to_resource(nfc_np, 0, &res);
2091 if (ret) {
2092 dev_err(dev, "Invalid or missing NFC IO resource (err = %d)\n",
2093 ret);
2094 goto out;
2095 }
2096
2097 iomem = devm_ioremap_resource(dev, &res);
2098 if (IS_ERR(iomem)) {
2099 ret = PTR_ERR(iomem);
2100 goto out;
2101 }
2102
2103 regmap_conf.name = "nfc-io";
2104 regmap_conf.max_register = resource_size(&res) - 4;
2105 nc->io = devm_regmap_init_mmio(dev, iomem, &regmap_conf);
2106 if (IS_ERR(nc->io)) {
2107 ret = PTR_ERR(nc->io);
2108 dev_err(dev, "Could not create NFC IO regmap (err = %d)\n",
2109 ret);
2110 goto out;
2111 }
2112
2113 ret = of_address_to_resource(nfc_np, 1, &res);
2114 if (ret) {
2115 dev_err(dev, "Invalid or missing HSMC resource (err = %d)\n",
2116 ret);
2117 goto out;
2118 }
2119
2120 iomem = devm_ioremap_resource(dev, &res);
2121 if (IS_ERR(iomem)) {
2122 ret = PTR_ERR(iomem);
2123 goto out;
2124 }
2125
2126 regmap_conf.name = "smc";
2127 regmap_conf.max_register = resource_size(&res) - 4;
2128 nc->base.smc = devm_regmap_init_mmio(dev, iomem, &regmap_conf);
2129 if (IS_ERR(nc->base.smc)) {
2130 ret = PTR_ERR(nc->base.smc);
2131 dev_err(dev, "Could not create NFC IO regmap (err = %d)\n",
2132 ret);
2133 goto out;
2134 }
2135
2136 ret = of_address_to_resource(nfc_np, 2, &res);
2137 if (ret) {
2138 dev_err(dev, "Invalid or missing SRAM resource (err = %d)\n",
2139 ret);
2140 goto out;
2141 }
2142
2143 nc->sram.virt = devm_ioremap_resource(dev, &res);
2144 if (IS_ERR(nc->sram.virt)) {
2145 ret = PTR_ERR(nc->sram.virt);
2146 goto out;
2147 }
2148
2149 nc->sram.dma = res.start;
2150
2151out:
2152 of_node_put(nfc_np);
2153
2154 return ret;
2155}
2156
2157static int
2158atmel_hsmc_nand_controller_init(struct atmel_hsmc_nand_controller *nc)
2159{
2160 struct device *dev = nc->base.dev;
2161 struct device_node *np;
2162 int ret;
2163
2164 np = of_parse_phandle(dev->parent->of_node, "atmel,smc", 0);
2165 if (!np) {
2166 dev_err(dev, "Missing or invalid atmel,smc property\n");
2167 return -EINVAL;
2168 }
2169
2170 nc->irq = of_irq_get(np, 0);
2171 of_node_put(np);
2172 if (nc->irq < 0) {
2173 if (nc->irq != -EPROBE_DEFER)
2174 dev_err(dev, "Failed to get IRQ number (err = %d)\n",
2175 nc->irq);
2176 return nc->irq;
2177 }
2178
2179 np = of_parse_phandle(dev->of_node, "atmel,nfc-io", 0);
2180 if (!np) {
2181 dev_err(dev, "Missing or invalid atmel,nfc-io property\n");
2182 return -EINVAL;
2183 }
2184
2185 nc->io = syscon_node_to_regmap(np);
2186 of_node_put(np);
2187 if (IS_ERR(nc->io)) {
2188 ret = PTR_ERR(nc->io);
2189 dev_err(dev, "Could not get NFC IO regmap (err = %d)\n", ret);
2190 return ret;
2191 }
2192
2193 nc->sram.pool = of_gen_pool_get(nc->base.dev->of_node,
2194 "atmel,nfc-sram", 0);
2195 if (!nc->sram.pool) {
2196 dev_err(nc->base.dev, "Missing SRAM\n");
2197 return -ENOMEM;
2198 }
2199
2200 nc->sram.virt = gen_pool_dma_alloc(nc->sram.pool,
2201 ATMEL_NFC_SRAM_SIZE,
2202 &nc->sram.dma);
2203 if (!nc->sram.virt) {
2204 dev_err(nc->base.dev,
2205 "Could not allocate memory from the NFC SRAM pool\n");
2206 return -ENOMEM;
2207 }
2208
2209 return 0;
2210}
2211
2212static int
2213atmel_hsmc_nand_controller_remove(struct atmel_nand_controller *nc)
2214{
2215 struct atmel_hsmc_nand_controller *hsmc_nc;
2216 int ret;
2217
2218 ret = atmel_nand_controller_remove_nands(nc);
2219 if (ret)
2220 return ret;
2221
2222 hsmc_nc = container_of(nc, struct atmel_hsmc_nand_controller, base);
2223 if (hsmc_nc->sram.pool)
2224 gen_pool_free(hsmc_nc->sram.pool,
2225 (unsigned long)hsmc_nc->sram.virt,
2226 ATMEL_NFC_SRAM_SIZE);
2227
2228 if (hsmc_nc->clk) {
2229 clk_disable_unprepare(hsmc_nc->clk);
2230 clk_put(hsmc_nc->clk);
2231 }
2232
2233 atmel_nand_controller_cleanup(nc);
2234
2235 return 0;
2236}
2237
2238static int atmel_hsmc_nand_controller_probe(struct platform_device *pdev,
2239 const struct atmel_nand_controller_caps *caps)
2240{
2241 struct device *dev = &pdev->dev;
2242 struct atmel_hsmc_nand_controller *nc;
2243 int ret;
2244
2245 nc = devm_kzalloc(dev, sizeof(*nc), GFP_KERNEL);
2246 if (!nc)
2247 return -ENOMEM;
2248
2249 ret = atmel_nand_controller_init(&nc->base, pdev, caps);
2250 if (ret)
2251 return ret;
2252
2253 if (caps->legacy_of_bindings)
2254 ret = atmel_hsmc_nand_controller_legacy_init(nc);
2255 else
2256 ret = atmel_hsmc_nand_controller_init(nc);
2257
2258 if (ret)
2259 return ret;
2260
2261 /* Make sure all irqs are masked before registering our IRQ handler. */
2262 regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IDR, 0xffffffff);
2263 ret = devm_request_irq(dev, nc->irq, atmel_nfc_interrupt,
2264 IRQF_SHARED, "nfc", nc);
2265 if (ret) {
2266 dev_err(dev,
2267 "Could not get register NFC interrupt handler (err = %d)\n",
2268 ret);
2269 goto err;
2270 }
2271
2272 /* Initial NFC configuration. */
2273 regmap_write(nc->base.smc, ATMEL_HSMC_NFC_CFG,
2274 ATMEL_HSMC_NFC_CFG_DTO_MAX);
2275
2276 ret = atmel_nand_controller_add_nands(&nc->base);
2277 if (ret)
2278 goto err;
2279
2280 return 0;
2281
2282err:
2283 atmel_hsmc_nand_controller_remove(&nc->base);
2284
2285 return ret;
2286}
2287
2288static const struct atmel_nand_controller_ops atmel_hsmc_nc_ops = {
2289 .probe = atmel_hsmc_nand_controller_probe,
2290 .remove = atmel_hsmc_nand_controller_remove,
2291 .ecc_init = atmel_hsmc_nand_ecc_init,
2292 .nand_init = atmel_hsmc_nand_init,
Boris Brezillonf9ce2ed2017-03-16 09:35:59 +01002293 .setup_data_interface = atmel_hsmc_nand_setup_data_interface,
Boris Brezillonf88fc122017-03-16 09:02:40 +01002294};
2295
2296static const struct atmel_nand_controller_caps atmel_sama5_nc_caps = {
2297 .has_dma = true,
2298 .ale_offs = BIT(21),
2299 .cle_offs = BIT(22),
2300 .ops = &atmel_hsmc_nc_ops,
2301};
2302
2303/* Only used to parse old bindings. */
2304static const struct atmel_nand_controller_caps atmel_sama5_nand_caps = {
2305 .has_dma = true,
2306 .ale_offs = BIT(21),
2307 .cle_offs = BIT(22),
2308 .ops = &atmel_hsmc_nc_ops,
2309 .legacy_of_bindings = true,
2310};
2311
2312static int atmel_smc_nand_controller_probe(struct platform_device *pdev,
2313 const struct atmel_nand_controller_caps *caps)
2314{
2315 struct device *dev = &pdev->dev;
2316 struct atmel_smc_nand_controller *nc;
2317 int ret;
2318
2319 nc = devm_kzalloc(dev, sizeof(*nc), GFP_KERNEL);
2320 if (!nc)
2321 return -ENOMEM;
2322
2323 ret = atmel_nand_controller_init(&nc->base, pdev, caps);
2324 if (ret)
2325 return ret;
2326
2327 ret = atmel_smc_nand_controller_init(nc);
2328 if (ret)
2329 return ret;
2330
2331 return atmel_nand_controller_add_nands(&nc->base);
2332}
2333
2334static int
2335atmel_smc_nand_controller_remove(struct atmel_nand_controller *nc)
2336{
2337 int ret;
2338
2339 ret = atmel_nand_controller_remove_nands(nc);
2340 if (ret)
2341 return ret;
2342
2343 atmel_nand_controller_cleanup(nc);
2344
2345 return 0;
2346}
2347
Boris Brezillonf9ce2ed2017-03-16 09:35:59 +01002348/*
2349 * The SMC reg layout of at91rm9200 is completely different which prevents us
2350 * from re-using atmel_smc_nand_setup_data_interface() for the
2351 * ->setup_data_interface() hook.
2352 * At this point, there's no support for the at91rm9200 SMC IP, so we leave
2353 * ->setup_data_interface() unassigned.
2354 */
2355static const struct atmel_nand_controller_ops at91rm9200_nc_ops = {
Boris Brezillonf88fc122017-03-16 09:02:40 +01002356 .probe = atmel_smc_nand_controller_probe,
2357 .remove = atmel_smc_nand_controller_remove,
2358 .ecc_init = atmel_nand_ecc_init,
2359 .nand_init = atmel_smc_nand_init,
2360};
2361
2362static const struct atmel_nand_controller_caps atmel_rm9200_nc_caps = {
2363 .ale_offs = BIT(21),
2364 .cle_offs = BIT(22),
Boris Brezillonf9ce2ed2017-03-16 09:35:59 +01002365 .ops = &at91rm9200_nc_ops,
2366};
2367
2368static const struct atmel_nand_controller_ops atmel_smc_nc_ops = {
2369 .probe = atmel_smc_nand_controller_probe,
2370 .remove = atmel_smc_nand_controller_remove,
2371 .ecc_init = atmel_nand_ecc_init,
2372 .nand_init = atmel_smc_nand_init,
2373 .setup_data_interface = atmel_smc_nand_setup_data_interface,
2374};
2375
2376static const struct atmel_nand_controller_caps atmel_sam9260_nc_caps = {
2377 .ale_offs = BIT(21),
2378 .cle_offs = BIT(22),
Boris Brezillonf88fc122017-03-16 09:02:40 +01002379 .ops = &atmel_smc_nc_ops,
2380};
2381
2382static const struct atmel_nand_controller_caps atmel_sam9261_nc_caps = {
2383 .ale_offs = BIT(22),
2384 .cle_offs = BIT(21),
2385 .ops = &atmel_smc_nc_ops,
2386};
2387
2388static const struct atmel_nand_controller_caps atmel_sam9g45_nc_caps = {
2389 .has_dma = true,
2390 .ale_offs = BIT(21),
2391 .cle_offs = BIT(22),
2392 .ops = &atmel_smc_nc_ops,
2393};
2394
2395/* Only used to parse old bindings. */
2396static const struct atmel_nand_controller_caps atmel_rm9200_nand_caps = {
2397 .ale_offs = BIT(21),
2398 .cle_offs = BIT(22),
2399 .ops = &atmel_smc_nc_ops,
2400 .legacy_of_bindings = true,
2401};
2402
2403static const struct atmel_nand_controller_caps atmel_sam9261_nand_caps = {
2404 .ale_offs = BIT(22),
2405 .cle_offs = BIT(21),
2406 .ops = &atmel_smc_nc_ops,
2407 .legacy_of_bindings = true,
2408};
2409
2410static const struct atmel_nand_controller_caps atmel_sam9g45_nand_caps = {
2411 .has_dma = true,
2412 .ale_offs = BIT(21),
2413 .cle_offs = BIT(22),
2414 .ops = &atmel_smc_nc_ops,
2415 .legacy_of_bindings = true,
2416};
2417
2418static const struct of_device_id atmel_nand_controller_of_ids[] = {
2419 {
2420 .compatible = "atmel,at91rm9200-nand-controller",
2421 .data = &atmel_rm9200_nc_caps,
2422 },
2423 {
2424 .compatible = "atmel,at91sam9260-nand-controller",
Boris Brezillonf9ce2ed2017-03-16 09:35:59 +01002425 .data = &atmel_sam9260_nc_caps,
Boris Brezillonf88fc122017-03-16 09:02:40 +01002426 },
2427 {
2428 .compatible = "atmel,at91sam9261-nand-controller",
2429 .data = &atmel_sam9261_nc_caps,
2430 },
2431 {
2432 .compatible = "atmel,at91sam9g45-nand-controller",
2433 .data = &atmel_sam9g45_nc_caps,
2434 },
2435 {
2436 .compatible = "atmel,sama5d3-nand-controller",
2437 .data = &atmel_sama5_nc_caps,
2438 },
2439 /* Support for old/deprecated bindings: */
2440 {
2441 .compatible = "atmel,at91rm9200-nand",
2442 .data = &atmel_rm9200_nand_caps,
2443 },
2444 {
2445 .compatible = "atmel,sama5d4-nand",
2446 .data = &atmel_rm9200_nand_caps,
2447 },
2448 {
2449 .compatible = "atmel,sama5d2-nand",
2450 .data = &atmel_rm9200_nand_caps,
2451 },
2452 { /* sentinel */ },
2453};
2454MODULE_DEVICE_TABLE(of, atmel_nand_controller_of_ids);
2455
2456static int atmel_nand_controller_probe(struct platform_device *pdev)
2457{
2458 const struct atmel_nand_controller_caps *caps;
2459
2460 if (pdev->id_entry)
2461 caps = (void *)pdev->id_entry->driver_data;
2462 else
2463 caps = of_device_get_match_data(&pdev->dev);
2464
2465 if (!caps) {
2466 dev_err(&pdev->dev, "Could not retrieve NFC caps\n");
2467 return -EINVAL;
2468 }
2469
2470 if (caps->legacy_of_bindings) {
2471 u32 ale_offs = 21;
2472
2473 /*
2474 * If we are parsing legacy DT props and the DT contains a
2475 * valid NFC node, forward the request to the sama5 logic.
2476 */
2477 if (of_find_compatible_node(pdev->dev.of_node, NULL,
2478 "atmel,sama5d3-nfc"))
2479 caps = &atmel_sama5_nand_caps;
2480
2481 /*
2482 * Even if the compatible says we are dealing with an
2483 * at91rm9200 controller, the atmel,nand-has-dma specify that
2484 * this controller supports DMA, which means we are in fact
2485 * dealing with an at91sam9g45+ controller.
2486 */
2487 if (!caps->has_dma &&
2488 of_property_read_bool(pdev->dev.of_node,
2489 "atmel,nand-has-dma"))
2490 caps = &atmel_sam9g45_nand_caps;
2491
2492 /*
2493 * All SoCs except the at91sam9261 are assigning ALE to A21 and
2494 * CLE to A22. If atmel,nand-addr-offset != 21 this means we're
2495 * actually dealing with an at91sam9261 controller.
2496 */
2497 of_property_read_u32(pdev->dev.of_node,
2498 "atmel,nand-addr-offset", &ale_offs);
2499 if (ale_offs != 21)
2500 caps = &atmel_sam9261_nand_caps;
2501 }
2502
2503 return caps->ops->probe(pdev, caps);
2504}
2505
2506static int atmel_nand_controller_remove(struct platform_device *pdev)
2507{
2508 struct atmel_nand_controller *nc = platform_get_drvdata(pdev);
2509
2510 return nc->caps->ops->remove(nc);
2511}
2512
Arnd Bergmann05b6c232017-05-31 10:19:26 +02002513static __maybe_unused int atmel_nand_controller_resume(struct device *dev)
Boris Brezillon6e532af2017-03-16 09:36:00 +01002514{
2515 struct atmel_nand_controller *nc = dev_get_drvdata(dev);
2516 struct atmel_nand *nand;
2517
2518 list_for_each_entry(nand, &nc->chips, node) {
2519 int i;
2520
2521 for (i = 0; i < nand->numcs; i++)
2522 nand_reset(&nand->base, i);
2523 }
2524
2525 return 0;
2526}
2527
2528static SIMPLE_DEV_PM_OPS(atmel_nand_controller_pm_ops, NULL,
2529 atmel_nand_controller_resume);
2530
Boris Brezillonf88fc122017-03-16 09:02:40 +01002531static struct platform_driver atmel_nand_controller_driver = {
2532 .driver = {
2533 .name = "atmel-nand-controller",
2534 .of_match_table = of_match_ptr(atmel_nand_controller_of_ids),
2535 },
2536 .probe = atmel_nand_controller_probe,
2537 .remove = atmel_nand_controller_remove,
2538};
2539module_platform_driver(atmel_nand_controller_driver);
2540
2541MODULE_LICENSE("GPL");
2542MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
2543MODULE_DESCRIPTION("NAND Flash Controller driver for Atmel SoCs");
2544MODULE_ALIAS("platform:atmel-nand-controller");