spi: davinci: add support for wait enable timeouts
[linux-2.6.git] / drivers / spi / davinci_spi.c
1 /*
2  * Copyright (C) 2009 Texas Instruments.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
17  */
18
19 #include <linux/interrupt.h>
20 #include <linux/io.h>
21 #include <linux/gpio.h>
22 #include <linux/module.h>
23 #include <linux/delay.h>
24 #include <linux/platform_device.h>
25 #include <linux/err.h>
26 #include <linux/clk.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/spi/spi.h>
29 #include <linux/spi/spi_bitbang.h>
30 #include <linux/slab.h>
31
32 #include <mach/spi.h>
33 #include <mach/edma.h>
34
35 #define SPI_NO_RESOURCE         ((resource_size_t)-1)
36
37 #define SPI_MAX_CHIPSELECT      2
38
39 #define CS_DEFAULT      0xFF
40
41 #define SPI_BUFSIZ      (SMP_CACHE_BYTES + 1)
42 #define DAVINCI_DMA_DATA_TYPE_S8        0x01
43 #define DAVINCI_DMA_DATA_TYPE_S16       0x02
44 #define DAVINCI_DMA_DATA_TYPE_S32       0x04
45
46 #define SPIFMT_PHASE_MASK       BIT(16)
47 #define SPIFMT_POLARITY_MASK    BIT(17)
48 #define SPIFMT_DISTIMER_MASK    BIT(18)
49 #define SPIFMT_SHIFTDIR_MASK    BIT(20)
50 #define SPIFMT_WAITENA_MASK     BIT(21)
51 #define SPIFMT_PARITYENA_MASK   BIT(22)
52 #define SPIFMT_ODD_PARITY_MASK  BIT(23)
53 #define SPIFMT_WDELAY_MASK      0x3f000000u
54 #define SPIFMT_WDELAY_SHIFT     24
55 #define SPIFMT_PRESCALE_SHIFT   8
56
57
58 /* SPIPC0 */
59 #define SPIPC0_DIFUN_MASK       BIT(11)         /* MISO */
60 #define SPIPC0_DOFUN_MASK       BIT(10)         /* MOSI */
61 #define SPIPC0_CLKFUN_MASK      BIT(9)          /* CLK */
62 #define SPIPC0_SPIENA_MASK      BIT(8)          /* nREADY */
63
64 #define SPIINT_MASKALL          0x0101035F
65 #define SPI_INTLVL_1            0x000001FFu
66 #define SPI_INTLVL_0            0x00000000u
67
68 /* SPIDAT1 (upper 16 bit defines) */
69 #define SPIDAT1_CSHOLD_MASK     BIT(12)
70
71 /* SPIGCR1 */
72 #define SPIGCR1_CLKMOD_MASK     BIT(1)
73 #define SPIGCR1_MASTER_MASK     BIT(0)
74 #define SPIGCR1_LOOPBACK_MASK   BIT(16)
75 #define SPIGCR1_SPIENA_MASK     BIT(24)
76
77 /* SPIBUF */
78 #define SPIBUF_TXFULL_MASK      BIT(29)
79 #define SPIBUF_RXEMPTY_MASK     BIT(31)
80
81 /* SPIDELAY */
82 #define SPIDELAY_C2TDELAY_SHIFT 24
83 #define SPIDELAY_C2TDELAY_MASK  (0xFF << SPIDELAY_C2TDELAY_SHIFT)
84 #define SPIDELAY_T2CDELAY_SHIFT 16
85 #define SPIDELAY_T2CDELAY_MASK  (0xFF << SPIDELAY_T2CDELAY_SHIFT)
86 #define SPIDELAY_T2EDELAY_SHIFT 8
87 #define SPIDELAY_T2EDELAY_MASK  (0xFF << SPIDELAY_T2EDELAY_SHIFT)
88 #define SPIDELAY_C2EDELAY_SHIFT 0
89 #define SPIDELAY_C2EDELAY_MASK  0xFF
90
91 /* Error Masks */
92 #define SPIFLG_DLEN_ERR_MASK            BIT(0)
93 #define SPIFLG_TIMEOUT_MASK             BIT(1)
94 #define SPIFLG_PARERR_MASK              BIT(2)
95 #define SPIFLG_DESYNC_MASK              BIT(3)
96 #define SPIFLG_BITERR_MASK              BIT(4)
97 #define SPIFLG_OVRRUN_MASK              BIT(6)
98 #define SPIFLG_RX_INTR_MASK             BIT(8)
99 #define SPIFLG_TX_INTR_MASK             BIT(9)
100 #define SPIFLG_BUF_INIT_ACTIVE_MASK     BIT(24)
101
102 #define SPIINT_BITERR_INTR      BIT(4)
103 #define SPIINT_OVRRUN_INTR      BIT(6)
104 #define SPIINT_RX_INTR          BIT(8)
105 #define SPIINT_TX_INTR          BIT(9)
106 #define SPIINT_DMA_REQ_EN       BIT(16)
107
108 /* SPI Controller registers */
109 #define SPIGCR0         0x00
110 #define SPIGCR1         0x04
111 #define SPIINT          0x08
112 #define SPILVL          0x0c
113 #define SPIFLG          0x10
114 #define SPIPC0          0x14
115 #define SPIDAT1         0x3c
116 #define SPIBUF          0x40
117 #define SPIDELAY        0x48
118 #define SPIDEF          0x4c
119 #define SPIFMT0         0x50
120
121 struct davinci_spi_slave {
122         u32     cmd_to_write;
123         u32     clk_ctrl_to_write;
124         u32     bytes_per_word;
125         u8      active_cs;
126 };
127
128 /* We have 2 DMA channels per CS, one for RX and one for TX */
129 struct davinci_spi_dma {
130         int                     dma_tx_channel;
131         int                     dma_rx_channel;
132         int                     dma_tx_sync_dev;
133         int                     dma_rx_sync_dev;
134         enum dma_event_q        eventq;
135
136         struct completion       dma_tx_completion;
137         struct completion       dma_rx_completion;
138 };
139
140 /* SPI Controller driver's private data. */
141 struct davinci_spi {
142         struct spi_bitbang      bitbang;
143         struct clk              *clk;
144
145         u8                      version;
146         resource_size_t         pbase;
147         void __iomem            *base;
148         size_t                  region_size;
149         u32                     irq;
150         struct completion       done;
151
152         const void              *tx;
153         void                    *rx;
154         u8                      *tmp_buf;
155         int                     count;
156         struct davinci_spi_dma  *dma_channels;
157         struct davinci_spi_platform_data *pdata;
158
159         void                    (*get_rx)(u32 rx_data, struct davinci_spi *);
160         u32                     (*get_tx)(struct davinci_spi *);
161
162         struct davinci_spi_slave slave[SPI_MAX_CHIPSELECT];
163 };
164
165 static struct davinci_spi_config davinci_spi_default_cfg;
166
167 static unsigned use_dma;
168
169 static void davinci_spi_rx_buf_u8(u32 data, struct davinci_spi *davinci_spi)
170 {
171         u8 *rx = davinci_spi->rx;
172
173         *rx++ = (u8)data;
174         davinci_spi->rx = rx;
175 }
176
177 static void davinci_spi_rx_buf_u16(u32 data, struct davinci_spi *davinci_spi)
178 {
179         u16 *rx = davinci_spi->rx;
180
181         *rx++ = (u16)data;
182         davinci_spi->rx = rx;
183 }
184
185 static u32 davinci_spi_tx_buf_u8(struct davinci_spi *davinci_spi)
186 {
187         u32 data;
188         const u8 *tx = davinci_spi->tx;
189
190         data = *tx++;
191         davinci_spi->tx = tx;
192         return data;
193 }
194
195 static u32 davinci_spi_tx_buf_u16(struct davinci_spi *davinci_spi)
196 {
197         u32 data;
198         const u16 *tx = davinci_spi->tx;
199
200         data = *tx++;
201         davinci_spi->tx = tx;
202         return data;
203 }
204
205 static inline void set_io_bits(void __iomem *addr, u32 bits)
206 {
207         u32 v = ioread32(addr);
208
209         v |= bits;
210         iowrite32(v, addr);
211 }
212
213 static inline void clear_io_bits(void __iomem *addr, u32 bits)
214 {
215         u32 v = ioread32(addr);
216
217         v &= ~bits;
218         iowrite32(v, addr);
219 }
220
221 static void davinci_spi_set_dma_req(const struct spi_device *spi, int enable)
222 {
223         struct davinci_spi *davinci_spi = spi_master_get_devdata(spi->master);
224
225         if (enable)
226                 set_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN);
227         else
228                 clear_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN);
229 }
230
231 /*
232  * Interface to control the chip select signal
233  */
234 static void davinci_spi_chipselect(struct spi_device *spi, int value)
235 {
236         struct davinci_spi *davinci_spi;
237         struct davinci_spi_platform_data *pdata;
238         u8 chip_sel = spi->chip_select;
239         u16 spidat1_cfg = CS_DEFAULT;
240         bool gpio_chipsel = false;
241
242         davinci_spi = spi_master_get_devdata(spi->master);
243         pdata = davinci_spi->pdata;
244
245         if (pdata->chip_sel && chip_sel < pdata->num_chipselect &&
246                                 pdata->chip_sel[chip_sel] != SPI_INTERN_CS)
247                 gpio_chipsel = true;
248
249         /*
250          * Board specific chip select logic decides the polarity and cs
251          * line for the controller
252          */
253         if (gpio_chipsel) {
254                 if (value == BITBANG_CS_ACTIVE)
255                         gpio_set_value(pdata->chip_sel[chip_sel], 0);
256                 else
257                         gpio_set_value(pdata->chip_sel[chip_sel], 1);
258         } else {
259                 if (value == BITBANG_CS_ACTIVE) {
260                         spidat1_cfg |= SPIDAT1_CSHOLD_MASK;
261                         spidat1_cfg &= ~(0x1 << chip_sel);
262                 }
263
264                 iowrite16(spidat1_cfg, davinci_spi->base + SPIDAT1 + 2);
265         }
266 }
267
268 /**
269  * davinci_spi_get_prescale - Calculates the correct prescale value
270  * @maxspeed_hz: the maximum rate the SPI clock can run at
271  *
272  * This function calculates the prescale value that generates a clock rate
273  * less than or equal to the specified maximum.
274  *
275  * Returns: calculated prescale - 1 for easy programming into SPI registers
276  * or negative error number if valid prescalar cannot be updated.
277  */
278 static inline int davinci_spi_get_prescale(struct davinci_spi *davinci_spi,
279                                                         u32 max_speed_hz)
280 {
281         int ret;
282
283         ret = DIV_ROUND_UP(clk_get_rate(davinci_spi->clk), max_speed_hz);
284
285         if (ret < 3 || ret > 256)
286                 return -EINVAL;
287
288         return ret - 1;
289 }
290
291 /**
292  * davinci_spi_setup_transfer - This functions will determine transfer method
293  * @spi: spi device on which data transfer to be done
294  * @t: spi transfer in which transfer info is filled
295  *
296  * This function determines data transfer method (8/16/32 bit transfer).
297  * It will also set the SPI Clock Control register according to
298  * SPI slave device freq.
299  */
300 static int davinci_spi_setup_transfer(struct spi_device *spi,
301                 struct spi_transfer *t)
302 {
303
304         struct davinci_spi *davinci_spi;
305         struct davinci_spi_config *spicfg;
306         u8 bits_per_word = 0;
307         u32 hz = 0, spifmt = 0, prescale = 0;
308
309         davinci_spi = spi_master_get_devdata(spi->master);
310         spicfg = (struct davinci_spi_config *)spi->controller_data;
311         if (!spicfg)
312                 spicfg = &davinci_spi_default_cfg;
313
314         if (t) {
315                 bits_per_word = t->bits_per_word;
316                 hz = t->speed_hz;
317         }
318
319         /* if bits_per_word is not set then set it default */
320         if (!bits_per_word)
321                 bits_per_word = spi->bits_per_word;
322
323         /*
324          * Assign function pointer to appropriate transfer method
325          * 8bit, 16bit or 32bit transfer
326          */
327         if (bits_per_word <= 8 && bits_per_word >= 2) {
328                 davinci_spi->get_rx = davinci_spi_rx_buf_u8;
329                 davinci_spi->get_tx = davinci_spi_tx_buf_u8;
330                 davinci_spi->slave[spi->chip_select].bytes_per_word = 1;
331         } else if (bits_per_word <= 16 && bits_per_word >= 2) {
332                 davinci_spi->get_rx = davinci_spi_rx_buf_u16;
333                 davinci_spi->get_tx = davinci_spi_tx_buf_u16;
334                 davinci_spi->slave[spi->chip_select].bytes_per_word = 2;
335         } else
336                 return -EINVAL;
337
338         if (!hz)
339                 hz = spi->max_speed_hz;
340
341         /* Set up SPIFMTn register, unique to this chipselect. */
342
343         prescale = davinci_spi_get_prescale(davinci_spi, hz);
344         if (prescale < 0)
345                 return prescale;
346
347         spifmt = (prescale << SPIFMT_PRESCALE_SHIFT) | (bits_per_word & 0x1f);
348
349         if (spi->mode & SPI_LSB_FIRST)
350                 spifmt |= SPIFMT_SHIFTDIR_MASK;
351
352         if (spi->mode & SPI_CPOL)
353                 spifmt |= SPIFMT_POLARITY_MASK;
354
355         if (!(spi->mode & SPI_CPHA))
356                 spifmt |= SPIFMT_PHASE_MASK;
357
358         /*
359          * Version 1 hardware supports two basic SPI modes:
360          *  - Standard SPI mode uses 4 pins, with chipselect
361          *  - 3 pin SPI is a 4 pin variant without CS (SPI_NO_CS)
362          *      (distinct from SPI_3WIRE, with just one data wire;
363          *      or similar variants without MOSI or without MISO)
364          *
365          * Version 2 hardware supports an optional handshaking signal,
366          * so it can support two more modes:
367          *  - 5 pin SPI variant is standard SPI plus SPI_READY
368          *  - 4 pin with enable is (SPI_READY | SPI_NO_CS)
369          */
370
371         if (davinci_spi->version == SPI_VERSION_2) {
372
373                 u32 delay = 0;
374
375                 spifmt |= ((spicfg->wdelay << SPIFMT_WDELAY_SHIFT)
376                                                         & SPIFMT_WDELAY_MASK);
377
378                 if (spicfg->odd_parity)
379                         spifmt |= SPIFMT_ODD_PARITY_MASK;
380
381                 if (spicfg->parity_enable)
382                         spifmt |= SPIFMT_PARITYENA_MASK;
383
384                 if (spicfg->timer_disable) {
385                         spifmt |= SPIFMT_DISTIMER_MASK;
386                 } else {
387                         delay |= (spicfg->c2tdelay << SPIDELAY_C2TDELAY_SHIFT)
388                                                 & SPIDELAY_C2TDELAY_MASK;
389                         delay |= (spicfg->t2cdelay << SPIDELAY_T2CDELAY_SHIFT)
390                                                 & SPIDELAY_T2CDELAY_MASK;
391                 }
392
393                 if (spi->mode & SPI_READY) {
394                         spifmt |= SPIFMT_WAITENA_MASK;
395                         delay |= (spicfg->t2edelay << SPIDELAY_T2EDELAY_SHIFT)
396                                                 & SPIDELAY_T2EDELAY_MASK;
397                         delay |= (spicfg->c2edelay << SPIDELAY_C2EDELAY_SHIFT)
398                                                 & SPIDELAY_C2EDELAY_MASK;
399                 }
400
401                 iowrite32(delay, davinci_spi->base + SPIDELAY);
402         }
403
404         iowrite32(spifmt, davinci_spi->base + SPIFMT0);
405
406         return 0;
407 }
408
409 static void davinci_spi_dma_rx_callback(unsigned lch, u16 ch_status, void *data)
410 {
411         struct spi_device *spi = (struct spi_device *)data;
412         struct davinci_spi *davinci_spi;
413         struct davinci_spi_dma *davinci_spi_dma;
414
415         davinci_spi = spi_master_get_devdata(spi->master);
416         davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]);
417
418         if (ch_status == DMA_COMPLETE)
419                 edma_stop(davinci_spi_dma->dma_rx_channel);
420         else
421                 edma_clean_channel(davinci_spi_dma->dma_rx_channel);
422
423         complete(&davinci_spi_dma->dma_rx_completion);
424         /* We must disable the DMA RX request */
425         davinci_spi_set_dma_req(spi, 0);
426 }
427
428 static void davinci_spi_dma_tx_callback(unsigned lch, u16 ch_status, void *data)
429 {
430         struct spi_device *spi = (struct spi_device *)data;
431         struct davinci_spi *davinci_spi;
432         struct davinci_spi_dma *davinci_spi_dma;
433
434         davinci_spi = spi_master_get_devdata(spi->master);
435         davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]);
436
437         if (ch_status == DMA_COMPLETE)
438                 edma_stop(davinci_spi_dma->dma_tx_channel);
439         else
440                 edma_clean_channel(davinci_spi_dma->dma_tx_channel);
441
442         complete(&davinci_spi_dma->dma_tx_completion);
443         /* We must disable the DMA TX request */
444         davinci_spi_set_dma_req(spi, 0);
445 }
446
447 static int davinci_spi_request_dma(struct spi_device *spi)
448 {
449         struct davinci_spi *davinci_spi;
450         struct davinci_spi_dma *davinci_spi_dma;
451         struct device *sdev;
452         int r;
453
454         davinci_spi = spi_master_get_devdata(spi->master);
455         davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
456         sdev = davinci_spi->bitbang.master->dev.parent;
457
458         r = edma_alloc_channel(davinci_spi_dma->dma_rx_sync_dev,
459                                 davinci_spi_dma_rx_callback, spi,
460                                 davinci_spi_dma->eventq);
461         if (r < 0) {
462                 dev_dbg(sdev, "Unable to request DMA channel for SPI RX\n");
463                 return -EAGAIN;
464         }
465         davinci_spi_dma->dma_rx_channel = r;
466         r = edma_alloc_channel(davinci_spi_dma->dma_tx_sync_dev,
467                                 davinci_spi_dma_tx_callback, spi,
468                                 davinci_spi_dma->eventq);
469         if (r < 0) {
470                 edma_free_channel(davinci_spi_dma->dma_rx_channel);
471                 davinci_spi_dma->dma_rx_channel = -1;
472                 dev_dbg(sdev, "Unable to request DMA channel for SPI TX\n");
473                 return -EAGAIN;
474         }
475         davinci_spi_dma->dma_tx_channel = r;
476
477         return 0;
478 }
479
480 /**
481  * davinci_spi_setup - This functions will set default transfer method
482  * @spi: spi device on which data transfer to be done
483  *
484  * This functions sets the default transfer method.
485  */
486 static int davinci_spi_setup(struct spi_device *spi)
487 {
488         int retval;
489         struct davinci_spi *davinci_spi;
490         struct davinci_spi_dma *davinci_spi_dma;
491
492         davinci_spi = spi_master_get_devdata(spi->master);
493
494         /* if bits per word length is zero then set it default 8 */
495         if (!spi->bits_per_word)
496                 spi->bits_per_word = 8;
497
498         davinci_spi->slave[spi->chip_select].cmd_to_write = 0;
499
500         if (use_dma && davinci_spi->dma_channels) {
501                 davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
502
503                 if ((davinci_spi_dma->dma_rx_channel == -1)
504                                 || (davinci_spi_dma->dma_tx_channel == -1)) {
505                         retval = davinci_spi_request_dma(spi);
506                         if (retval < 0)
507                                 return retval;
508                 }
509         }
510
511         retval = davinci_spi_setup_transfer(spi, NULL);
512
513         return retval;
514 }
515
516 static void davinci_spi_cleanup(struct spi_device *spi)
517 {
518         struct davinci_spi *davinci_spi = spi_master_get_devdata(spi->master);
519         struct davinci_spi_dma *davinci_spi_dma;
520
521         davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
522
523         if (use_dma && davinci_spi->dma_channels) {
524                 davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
525
526                 if ((davinci_spi_dma->dma_rx_channel != -1)
527                                 && (davinci_spi_dma->dma_tx_channel != -1)) {
528                         edma_free_channel(davinci_spi_dma->dma_tx_channel);
529                         edma_free_channel(davinci_spi_dma->dma_rx_channel);
530                 }
531         }
532 }
533
534 static int davinci_spi_bufs_prep(struct spi_device *spi,
535                                  struct davinci_spi *davinci_spi)
536 {
537         struct davinci_spi_platform_data *pdata;
538         int op_mode = 0;
539
540         /*
541          * REVISIT  unless devices disagree about SPI_LOOP or
542          * SPI_READY (SPI_NO_CS only allows one device!), this
543          * should not need to be done before each message...
544          * optimize for both flags staying cleared.
545          */
546
547         op_mode = SPIPC0_DIFUN_MASK
548                 | SPIPC0_DOFUN_MASK
549                 | SPIPC0_CLKFUN_MASK;
550         if (!(spi->mode & SPI_NO_CS)) {
551                 pdata = davinci_spi->pdata;
552                 if (!pdata->chip_sel ||
553                      pdata->chip_sel[spi->chip_select] == SPI_INTERN_CS)
554                         op_mode |= 1 << spi->chip_select;
555         }
556         if (spi->mode & SPI_READY)
557                 op_mode |= SPIPC0_SPIENA_MASK;
558
559         iowrite32(op_mode, davinci_spi->base + SPIPC0);
560
561         if (spi->mode & SPI_LOOP)
562                 set_io_bits(davinci_spi->base + SPIGCR1,
563                                 SPIGCR1_LOOPBACK_MASK);
564         else
565                 clear_io_bits(davinci_spi->base + SPIGCR1,
566                                 SPIGCR1_LOOPBACK_MASK);
567
568         return 0;
569 }
570
571 static int davinci_spi_check_error(struct davinci_spi *davinci_spi,
572                                    int int_status)
573 {
574         struct device *sdev = davinci_spi->bitbang.master->dev.parent;
575
576         if (int_status & SPIFLG_TIMEOUT_MASK) {
577                 dev_dbg(sdev, "SPI Time-out Error\n");
578                 return -ETIMEDOUT;
579         }
580         if (int_status & SPIFLG_DESYNC_MASK) {
581                 dev_dbg(sdev, "SPI Desynchronization Error\n");
582                 return -EIO;
583         }
584         if (int_status & SPIFLG_BITERR_MASK) {
585                 dev_dbg(sdev, "SPI Bit error\n");
586                 return -EIO;
587         }
588
589         if (davinci_spi->version == SPI_VERSION_2) {
590                 if (int_status & SPIFLG_DLEN_ERR_MASK) {
591                         dev_dbg(sdev, "SPI Data Length Error\n");
592                         return -EIO;
593                 }
594                 if (int_status & SPIFLG_PARERR_MASK) {
595                         dev_dbg(sdev, "SPI Parity Error\n");
596                         return -EIO;
597                 }
598                 if (int_status & SPIFLG_OVRRUN_MASK) {
599                         dev_dbg(sdev, "SPI Data Overrun error\n");
600                         return -EIO;
601                 }
602                 if (int_status & SPIFLG_TX_INTR_MASK) {
603                         dev_dbg(sdev, "SPI TX intr bit set\n");
604                         return -EIO;
605                 }
606                 if (int_status & SPIFLG_BUF_INIT_ACTIVE_MASK) {
607                         dev_dbg(sdev, "SPI Buffer Init Active\n");
608                         return -EBUSY;
609                 }
610         }
611
612         return 0;
613 }
614
615 /**
616  * davinci_spi_bufs - functions which will handle transfer data
617  * @spi: spi device on which data transfer to be done
618  * @t: spi transfer in which transfer info is filled
619  *
620  * This function will put data to be transferred into data register
621  * of SPI controller and then wait until the completion will be marked
622  * by the IRQ Handler.
623  */
624 static int davinci_spi_bufs_pio(struct spi_device *spi, struct spi_transfer *t)
625 {
626         struct davinci_spi *davinci_spi;
627         int int_status, count, ret;
628         u8 conv;
629         u32 tx_data, data1_reg_val;
630         u32 buf_val, flg_val;
631         struct davinci_spi_platform_data *pdata;
632
633         davinci_spi = spi_master_get_devdata(spi->master);
634         pdata = davinci_spi->pdata;
635
636         davinci_spi->tx = t->tx_buf;
637         davinci_spi->rx = t->rx_buf;
638
639         /* convert len to words based on bits_per_word */
640         conv = davinci_spi->slave[spi->chip_select].bytes_per_word;
641         davinci_spi->count = t->len / conv;
642
643         data1_reg_val = ioread32(davinci_spi->base + SPIDAT1);
644
645         INIT_COMPLETION(davinci_spi->done);
646
647         ret = davinci_spi_bufs_prep(spi, davinci_spi);
648         if (ret)
649                 return ret;
650
651         /* Enable SPI */
652         set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
653
654         count = davinci_spi->count;
655
656         /* Determine the command to execute READ or WRITE */
657         if (t->tx_buf) {
658                 clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL);
659
660                 while (1) {
661                         tx_data = davinci_spi->get_tx(davinci_spi);
662
663                         data1_reg_val &= ~(0xFFFF);
664                         data1_reg_val |= (0xFFFF & tx_data);
665
666                         buf_val = ioread32(davinci_spi->base + SPIBUF);
667                         if ((buf_val & SPIBUF_TXFULL_MASK) == 0) {
668                                 iowrite32(data1_reg_val,
669                                                 davinci_spi->base + SPIDAT1);
670
671                                 count--;
672                         }
673                         while (ioread32(davinci_spi->base + SPIBUF)
674                                         & SPIBUF_RXEMPTY_MASK)
675                                 cpu_relax();
676
677                         /* getting the returned byte */
678                         if (t->rx_buf) {
679                                 buf_val = ioread32(davinci_spi->base + SPIBUF);
680                                 davinci_spi->get_rx(buf_val, davinci_spi);
681                         }
682                         if (count <= 0)
683                                 break;
684                 }
685         } else {
686                 if (pdata->poll_mode) {
687                         while (1) {
688                                 /* keeps the serial clock going */
689                                 if ((ioread32(davinci_spi->base + SPIBUF)
690                                                 & SPIBUF_TXFULL_MASK) == 0)
691                                         iowrite32(data1_reg_val,
692                                                 davinci_spi->base + SPIDAT1);
693
694                                 while (ioread32(davinci_spi->base + SPIBUF) &
695                                                 SPIBUF_RXEMPTY_MASK)
696                                         cpu_relax();
697
698                                 flg_val = ioread32(davinci_spi->base + SPIFLG);
699                                 buf_val = ioread32(davinci_spi->base + SPIBUF);
700
701                                 davinci_spi->get_rx(buf_val, davinci_spi);
702
703                                 count--;
704                                 if (count <= 0)
705                                         break;
706                         }
707                 } else {        /* Receive in Interrupt mode */
708                         int i;
709
710                         for (i = 0; i < davinci_spi->count; i++) {
711                                 set_io_bits(davinci_spi->base + SPIINT,
712                                                 SPIINT_BITERR_INTR
713                                                 | SPIINT_OVRRUN_INTR
714                                                 | SPIINT_RX_INTR);
715
716                                 iowrite32(data1_reg_val,
717                                                 davinci_spi->base + SPIDAT1);
718
719                                 while (ioread32(davinci_spi->base + SPIINT) &
720                                                 SPIINT_RX_INTR)
721                                         cpu_relax();
722                         }
723                         iowrite32((data1_reg_val & 0x0ffcffff),
724                                         davinci_spi->base + SPIDAT1);
725                 }
726         }
727
728         /*
729          * Check for bit error, desync error,parity error,timeout error and
730          * receive overflow errors
731          */
732         int_status = ioread32(davinci_spi->base + SPIFLG);
733
734         ret = davinci_spi_check_error(davinci_spi, int_status);
735         if (ret != 0)
736                 return ret;
737
738         /* SPI Framework maintains the count only in bytes so convert back */
739         davinci_spi->count *= conv;
740
741         return t->len;
742 }
743
744 #define DAVINCI_DMA_DATA_TYPE_S8        0x01
745 #define DAVINCI_DMA_DATA_TYPE_S16       0x02
746 #define DAVINCI_DMA_DATA_TYPE_S32       0x04
747
748 static int davinci_spi_bufs_dma(struct spi_device *spi, struct spi_transfer *t)
749 {
750         struct davinci_spi *davinci_spi;
751         int int_status = 0;
752         int count, temp_count;
753         u8 conv = 1;
754         u32 data1_reg_val;
755         struct davinci_spi_dma *davinci_spi_dma;
756         int word_len, data_type, ret;
757         unsigned long tx_reg, rx_reg;
758         struct device *sdev;
759
760         davinci_spi = spi_master_get_devdata(spi->master);
761         sdev = davinci_spi->bitbang.master->dev.parent;
762
763         davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
764
765         tx_reg = (unsigned long)davinci_spi->pbase + SPIDAT1;
766         rx_reg = (unsigned long)davinci_spi->pbase + SPIBUF;
767
768         davinci_spi->tx = t->tx_buf;
769         davinci_spi->rx = t->rx_buf;
770
771         /* convert len to words based on bits_per_word */
772         conv = davinci_spi->slave[spi->chip_select].bytes_per_word;
773         davinci_spi->count = t->len / conv;
774
775         data1_reg_val = ioread32(davinci_spi->base + SPIDAT1);
776
777         INIT_COMPLETION(davinci_spi->done);
778
779         init_completion(&davinci_spi_dma->dma_rx_completion);
780         init_completion(&davinci_spi_dma->dma_tx_completion);
781
782         word_len = conv * 8;
783
784         if (word_len <= 8)
785                 data_type = DAVINCI_DMA_DATA_TYPE_S8;
786         else if (word_len <= 16)
787                 data_type = DAVINCI_DMA_DATA_TYPE_S16;
788         else if (word_len <= 32)
789                 data_type = DAVINCI_DMA_DATA_TYPE_S32;
790         else
791                 return -EINVAL;
792
793         ret = davinci_spi_bufs_prep(spi, davinci_spi);
794         if (ret)
795                 return ret;
796
797         count = davinci_spi->count;     /* the number of elements */
798
799         /* disable all interrupts for dma transfers */
800         clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL);
801         /* Disable SPI to write configuration bits in SPIDAT */
802         clear_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
803         /* Enable SPI */
804         set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
805
806         if (t->tx_buf) {
807                 t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf, count,
808                                 DMA_TO_DEVICE);
809                 if (dma_mapping_error(&spi->dev, t->tx_dma)) {
810                         dev_dbg(sdev, "Unable to DMA map a %d bytes"
811                                 " TX buffer\n", count);
812                         return -ENOMEM;
813                 }
814                 temp_count = count;
815         } else {
816                 /* We need TX clocking for RX transaction */
817                 t->tx_dma = dma_map_single(&spi->dev,
818                                 (void *)davinci_spi->tmp_buf, count + 1,
819                                 DMA_TO_DEVICE);
820                 if (dma_mapping_error(&spi->dev, t->tx_dma)) {
821                         dev_dbg(sdev, "Unable to DMA map a %d bytes"
822                                 " TX tmp buffer\n", count);
823                         return -ENOMEM;
824                 }
825                 temp_count = count + 1;
826         }
827
828         edma_set_transfer_params(davinci_spi_dma->dma_tx_channel,
829                                         data_type, temp_count, 1, 0, ASYNC);
830         edma_set_dest(davinci_spi_dma->dma_tx_channel, tx_reg, INCR, W8BIT);
831         edma_set_src(davinci_spi_dma->dma_tx_channel, t->tx_dma, INCR, W8BIT);
832         edma_set_src_index(davinci_spi_dma->dma_tx_channel, data_type, 0);
833         edma_set_dest_index(davinci_spi_dma->dma_tx_channel, 0, 0);
834
835         if (t->rx_buf) {
836                 /* initiate transaction */
837                 iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1);
838
839                 t->rx_dma = dma_map_single(&spi->dev, (void *)t->rx_buf, count,
840                                 DMA_FROM_DEVICE);
841                 if (dma_mapping_error(&spi->dev, t->rx_dma)) {
842                         dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n",
843                                         count);
844                         if (t->tx_buf != NULL)
845                                 dma_unmap_single(NULL, t->tx_dma,
846                                                  count, DMA_TO_DEVICE);
847                         return -ENOMEM;
848                 }
849                 edma_set_transfer_params(davinci_spi_dma->dma_rx_channel,
850                                 data_type, count, 1, 0, ASYNC);
851                 edma_set_src(davinci_spi_dma->dma_rx_channel,
852                                 rx_reg, INCR, W8BIT);
853                 edma_set_dest(davinci_spi_dma->dma_rx_channel,
854                                 t->rx_dma, INCR, W8BIT);
855                 edma_set_src_index(davinci_spi_dma->dma_rx_channel, 0, 0);
856                 edma_set_dest_index(davinci_spi_dma->dma_rx_channel,
857                                 data_type, 0);
858         }
859
860         if ((t->tx_buf) || (t->rx_buf))
861                 edma_start(davinci_spi_dma->dma_tx_channel);
862
863         if (t->rx_buf)
864                 edma_start(davinci_spi_dma->dma_rx_channel);
865
866         if ((t->rx_buf) || (t->tx_buf))
867                 davinci_spi_set_dma_req(spi, 1);
868
869         if (t->tx_buf)
870                 wait_for_completion_interruptible(
871                                 &davinci_spi_dma->dma_tx_completion);
872
873         if (t->rx_buf)
874                 wait_for_completion_interruptible(
875                                 &davinci_spi_dma->dma_rx_completion);
876
877         dma_unmap_single(NULL, t->tx_dma, temp_count, DMA_TO_DEVICE);
878
879         if (t->rx_buf)
880                 dma_unmap_single(NULL, t->rx_dma, count, DMA_FROM_DEVICE);
881
882         /*
883          * Check for bit error, desync error,parity error,timeout error and
884          * receive overflow errors
885          */
886         int_status = ioread32(davinci_spi->base + SPIFLG);
887
888         ret = davinci_spi_check_error(davinci_spi, int_status);
889         if (ret != 0)
890                 return ret;
891
892         /* SPI Framework maintains the count only in bytes so convert back */
893         davinci_spi->count *= conv;
894
895         return t->len;
896 }
897
898 /**
899  * davinci_spi_irq - IRQ handler for DaVinci SPI
900  * @irq: IRQ number for this SPI Master
901  * @context_data: structure for SPI Master controller davinci_spi
902  */
903 static irqreturn_t davinci_spi_irq(s32 irq, void *context_data)
904 {
905         struct davinci_spi *davinci_spi = context_data;
906         u32 int_status, rx_data = 0;
907         irqreturn_t ret = IRQ_NONE;
908
909         int_status = ioread32(davinci_spi->base + SPIFLG);
910
911         while ((int_status & SPIFLG_RX_INTR_MASK)) {
912                 if (likely(int_status & SPIFLG_RX_INTR_MASK)) {
913                         ret = IRQ_HANDLED;
914
915                         rx_data = ioread32(davinci_spi->base + SPIBUF);
916                         davinci_spi->get_rx(rx_data, davinci_spi);
917
918                         /* Disable Receive Interrupt */
919                         iowrite32(~(SPIINT_RX_INTR | SPIINT_TX_INTR),
920                                         davinci_spi->base + SPIINT);
921                 } else
922                         (void)davinci_spi_check_error(davinci_spi, int_status);
923
924                 int_status = ioread32(davinci_spi->base + SPIFLG);
925         }
926
927         return ret;
928 }
929
930 /**
931  * davinci_spi_probe - probe function for SPI Master Controller
932  * @pdev: platform_device structure which contains plateform specific data
933  */
934 static int davinci_spi_probe(struct platform_device *pdev)
935 {
936         struct spi_master *master;
937         struct davinci_spi *davinci_spi;
938         struct davinci_spi_platform_data *pdata;
939         struct resource *r, *mem;
940         resource_size_t dma_rx_chan = SPI_NO_RESOURCE;
941         resource_size_t dma_tx_chan = SPI_NO_RESOURCE;
942         resource_size_t dma_eventq = SPI_NO_RESOURCE;
943         int i = 0, ret = 0;
944
945         pdata = pdev->dev.platform_data;
946         if (pdata == NULL) {
947                 ret = -ENODEV;
948                 goto err;
949         }
950
951         master = spi_alloc_master(&pdev->dev, sizeof(struct davinci_spi));
952         if (master == NULL) {
953                 ret = -ENOMEM;
954                 goto err;
955         }
956
957         dev_set_drvdata(&pdev->dev, master);
958
959         davinci_spi = spi_master_get_devdata(master);
960         if (davinci_spi == NULL) {
961                 ret = -ENOENT;
962                 goto free_master;
963         }
964
965         r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
966         if (r == NULL) {
967                 ret = -ENOENT;
968                 goto free_master;
969         }
970
971         davinci_spi->pbase = r->start;
972         davinci_spi->region_size = resource_size(r);
973         davinci_spi->pdata = pdata;
974
975         mem = request_mem_region(r->start, davinci_spi->region_size,
976                                         pdev->name);
977         if (mem == NULL) {
978                 ret = -EBUSY;
979                 goto free_master;
980         }
981
982         davinci_spi->base = ioremap(r->start, davinci_spi->region_size);
983         if (davinci_spi->base == NULL) {
984                 ret = -ENOMEM;
985                 goto release_region;
986         }
987
988         davinci_spi->irq = platform_get_irq(pdev, 0);
989         if (davinci_spi->irq <= 0) {
990                 ret = -EINVAL;
991                 goto unmap_io;
992         }
993
994         ret = request_irq(davinci_spi->irq, davinci_spi_irq, IRQF_DISABLED,
995                           dev_name(&pdev->dev), davinci_spi);
996         if (ret)
997                 goto unmap_io;
998
999         /* Allocate tmp_buf for tx_buf */
1000         davinci_spi->tmp_buf = kzalloc(SPI_BUFSIZ, GFP_KERNEL);
1001         if (davinci_spi->tmp_buf == NULL) {
1002                 ret = -ENOMEM;
1003                 goto irq_free;
1004         }
1005
1006         davinci_spi->bitbang.master = spi_master_get(master);
1007         if (davinci_spi->bitbang.master == NULL) {
1008                 ret = -ENODEV;
1009                 goto free_tmp_buf;
1010         }
1011
1012         davinci_spi->clk = clk_get(&pdev->dev, NULL);
1013         if (IS_ERR(davinci_spi->clk)) {
1014                 ret = -ENODEV;
1015                 goto put_master;
1016         }
1017         clk_enable(davinci_spi->clk);
1018
1019         master->bus_num = pdev->id;
1020         master->num_chipselect = pdata->num_chipselect;
1021         master->setup = davinci_spi_setup;
1022         master->cleanup = davinci_spi_cleanup;
1023
1024         davinci_spi->bitbang.chipselect = davinci_spi_chipselect;
1025         davinci_spi->bitbang.setup_transfer = davinci_spi_setup_transfer;
1026
1027         davinci_spi->version = pdata->version;
1028         use_dma = pdata->use_dma;
1029
1030         davinci_spi->bitbang.flags = SPI_NO_CS | SPI_LSB_FIRST | SPI_LOOP;
1031         if (davinci_spi->version == SPI_VERSION_2)
1032                 davinci_spi->bitbang.flags |= SPI_READY;
1033
1034         if (use_dma) {
1035                 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1036                 if (r)
1037                         dma_rx_chan = r->start;
1038                 r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1039                 if (r)
1040                         dma_tx_chan = r->start;
1041                 r = platform_get_resource(pdev, IORESOURCE_DMA, 2);
1042                 if (r)
1043                         dma_eventq = r->start;
1044         }
1045
1046         if (!use_dma ||
1047             dma_rx_chan == SPI_NO_RESOURCE ||
1048             dma_tx_chan == SPI_NO_RESOURCE ||
1049             dma_eventq  == SPI_NO_RESOURCE) {
1050                 davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_pio;
1051                 use_dma = 0;
1052         } else {
1053                 davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_dma;
1054                 davinci_spi->dma_channels = kzalloc(master->num_chipselect
1055                                 * sizeof(struct davinci_spi_dma), GFP_KERNEL);
1056                 if (davinci_spi->dma_channels == NULL) {
1057                         ret = -ENOMEM;
1058                         goto free_clk;
1059                 }
1060
1061                 for (i = 0; i < master->num_chipselect; i++) {
1062                         davinci_spi->dma_channels[i].dma_rx_channel = -1;
1063                         davinci_spi->dma_channels[i].dma_rx_sync_dev =
1064                                 dma_rx_chan;
1065                         davinci_spi->dma_channels[i].dma_tx_channel = -1;
1066                         davinci_spi->dma_channels[i].dma_tx_sync_dev =
1067                                 dma_tx_chan;
1068                         davinci_spi->dma_channels[i].eventq = dma_eventq;
1069                 }
1070                 dev_info(&pdev->dev, "DaVinci SPI driver in EDMA mode\n"
1071                                 "Using RX channel = %d , TX channel = %d and "
1072                                 "event queue = %d", dma_rx_chan, dma_tx_chan,
1073                                 dma_eventq);
1074         }
1075
1076         davinci_spi->get_rx = davinci_spi_rx_buf_u8;
1077         davinci_spi->get_tx = davinci_spi_tx_buf_u8;
1078
1079         init_completion(&davinci_spi->done);
1080
1081         /* Reset In/OUT SPI module */
1082         iowrite32(0, davinci_spi->base + SPIGCR0);
1083         udelay(100);
1084         iowrite32(1, davinci_spi->base + SPIGCR0);
1085
1086         /* initialize chip selects */
1087         if (pdata->chip_sel) {
1088                 for (i = 0; i < pdata->num_chipselect; i++) {
1089                         if (pdata->chip_sel[i] != SPI_INTERN_CS)
1090                                 gpio_direction_output(pdata->chip_sel[i], 1);
1091                 }
1092         }
1093
1094         /* Clock internal */
1095         if (davinci_spi->pdata->clk_internal)
1096                 set_io_bits(davinci_spi->base + SPIGCR1,
1097                                 SPIGCR1_CLKMOD_MASK);
1098         else
1099                 clear_io_bits(davinci_spi->base + SPIGCR1,
1100                                 SPIGCR1_CLKMOD_MASK);
1101
1102         iowrite32(CS_DEFAULT, davinci_spi->base + SPIDEF);
1103
1104         /* master mode default */
1105         set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_MASTER_MASK);
1106
1107         if (davinci_spi->pdata->intr_level)
1108                 iowrite32(SPI_INTLVL_1, davinci_spi->base + SPILVL);
1109         else
1110                 iowrite32(SPI_INTLVL_0, davinci_spi->base + SPILVL);
1111
1112         ret = spi_bitbang_start(&davinci_spi->bitbang);
1113         if (ret)
1114                 goto free_clk;
1115
1116         dev_info(&pdev->dev, "Controller at 0x%p\n", davinci_spi->base);
1117
1118         if (!pdata->poll_mode)
1119                 dev_info(&pdev->dev, "Operating in interrupt mode"
1120                         " using IRQ %d\n", davinci_spi->irq);
1121
1122         return ret;
1123
1124 free_clk:
1125         clk_disable(davinci_spi->clk);
1126         clk_put(davinci_spi->clk);
1127 put_master:
1128         spi_master_put(master);
1129 free_tmp_buf:
1130         kfree(davinci_spi->tmp_buf);
1131 irq_free:
1132         free_irq(davinci_spi->irq, davinci_spi);
1133 unmap_io:
1134         iounmap(davinci_spi->base);
1135 release_region:
1136         release_mem_region(davinci_spi->pbase, davinci_spi->region_size);
1137 free_master:
1138         kfree(master);
1139 err:
1140         return ret;
1141 }
1142
1143 /**
1144  * davinci_spi_remove - remove function for SPI Master Controller
1145  * @pdev: platform_device structure which contains plateform specific data
1146  *
1147  * This function will do the reverse action of davinci_spi_probe function
1148  * It will free the IRQ and SPI controller's memory region.
1149  * It will also call spi_bitbang_stop to destroy the work queue which was
1150  * created by spi_bitbang_start.
1151  */
1152 static int __exit davinci_spi_remove(struct platform_device *pdev)
1153 {
1154         struct davinci_spi *davinci_spi;
1155         struct spi_master *master;
1156
1157         master = dev_get_drvdata(&pdev->dev);
1158         davinci_spi = spi_master_get_devdata(master);
1159
1160         spi_bitbang_stop(&davinci_spi->bitbang);
1161
1162         clk_disable(davinci_spi->clk);
1163         clk_put(davinci_spi->clk);
1164         spi_master_put(master);
1165         kfree(davinci_spi->tmp_buf);
1166         free_irq(davinci_spi->irq, davinci_spi);
1167         iounmap(davinci_spi->base);
1168         release_mem_region(davinci_spi->pbase, davinci_spi->region_size);
1169
1170         return 0;
1171 }
1172
1173 static struct platform_driver davinci_spi_driver = {
1174         .driver.name = "spi_davinci",
1175         .remove = __exit_p(davinci_spi_remove),
1176 };
1177
1178 static int __init davinci_spi_init(void)
1179 {
1180         return platform_driver_probe(&davinci_spi_driver, davinci_spi_probe);
1181 }
1182 module_init(davinci_spi_init);
1183
1184 static void __exit davinci_spi_exit(void)
1185 {
1186         platform_driver_unregister(&davinci_spi_driver);
1187 }
1188 module_exit(davinci_spi_exit);
1189
1190 MODULE_DESCRIPTION("TI DaVinci SPI Master Controller Driver");
1191 MODULE_LICENSE("GPL");