016108a3b1c3f775e48be58b661e8e640517ee64
[linux-2.6.git] / drivers / net / caif / caif_hsi.c
1 /*
2  * Copyright (C) ST-Ericsson AB 2010
3  * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
4  * Author:  Daniel Martensson / daniel.martensson@stericsson.com
5  *          Dmitry.Tarnyagin  / dmitry.tarnyagin@stericsson.com
6  * License terms: GNU General Public License (GPL) version 2.
7  */
8
9 #include <linux/version.h>
10 #include <linux/init.h>
11 #include <linux/module.h>
12 #include <linux/device.h>
13 #include <linux/platform_device.h>
14 #include <linux/netdevice.h>
15 #include <linux/string.h>
16 #include <linux/list.h>
17 #include <linux/interrupt.h>
18 #include <linux/delay.h>
19 #include <linux/sched.h>
20 #include <linux/if_arp.h>
21 #include <linux/timer.h>
22 #include <net/caif/caif_layer.h>
23 #include <net/caif/caif_hsi.h>
24
25 MODULE_LICENSE("GPL");
26 MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>");
27 MODULE_DESCRIPTION("CAIF HSI driver");
28
29 /* Returns the number of padding bytes for alignment. */
30 #define PAD_POW2(x, pow) ((((x)&((pow)-1)) == 0) ? 0 :\
31                                 (((pow)-((x)&((pow)-1)))))
32
33 /*
34  * HSI padding options.
35  * Warning: must be a base of 2 (& operation used) and can not be zero !
36  */
37 static int hsi_head_align = 4;
38 module_param(hsi_head_align, int, S_IRUGO);
39 MODULE_PARM_DESC(hsi_head_align, "HSI head alignment.");
40
41 static int hsi_tail_align = 4;
42 module_param(hsi_tail_align, int, S_IRUGO);
43 MODULE_PARM_DESC(hsi_tail_align, "HSI tail alignment.");
44
45 /*
46  * HSI link layer flowcontrol thresholds.
47  * Warning: A high threshold value migth increase throughput but it will at
48  * the same time prevent channel prioritization and increase the risk of
49  * flooding the modem. The high threshold should be above the low.
50  */
51 static int hsi_high_threshold = 100;
52 module_param(hsi_high_threshold, int, S_IRUGO);
53 MODULE_PARM_DESC(hsi_high_threshold, "HSI high threshold (FLOW OFF).");
54
55 static int hsi_low_threshold = 50;
56 module_param(hsi_low_threshold, int, S_IRUGO);
57 MODULE_PARM_DESC(hsi_low_threshold, "HSI high threshold (FLOW ON).");
58
59 #define ON 1
60 #define OFF 0
61
62 /*
63  * Threshold values for the HSI packet queue. Flowcontrol will be asserted
64  * when the number of packets exceeds HIGH_WATER_MARK. It will not be
65  * de-asserted before the number of packets drops below LOW_WATER_MARK.
66  */
67 #define LOW_WATER_MARK   hsi_low_threshold
68 #define HIGH_WATER_MARK  hsi_high_threshold
69
70 static LIST_HEAD(cfhsi_list);
71 static spinlock_t cfhsi_list_lock;
72
73 static void cfhsi_inactivity_tout(unsigned long arg)
74 {
75         struct cfhsi *cfhsi = (struct cfhsi *)arg;
76
77         dev_dbg(&cfhsi->ndev->dev, "%s.\n",
78                 __func__);
79
80         /* Schedule power down work queue. */
81         if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
82                 queue_work(cfhsi->wq, &cfhsi->wake_down_work);
83 }
84
85 static void cfhsi_abort_tx(struct cfhsi *cfhsi)
86 {
87         struct sk_buff *skb;
88
89         for (;;) {
90                 spin_lock_bh(&cfhsi->lock);
91                 skb = skb_dequeue(&cfhsi->qhead);
92                 if (!skb)
93                         break;
94
95                 cfhsi->ndev->stats.tx_errors++;
96                 cfhsi->ndev->stats.tx_dropped++;
97                 spin_unlock_bh(&cfhsi->lock);
98                 kfree_skb(skb);
99         }
100         cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
101         if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
102                 mod_timer(&cfhsi->timer, jiffies + CFHSI_INACTIVITY_TOUT);
103         spin_unlock_bh(&cfhsi->lock);
104 }
105
106 static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
107 {
108         char buffer[32]; /* Any reasonable value */
109         size_t fifo_occupancy;
110         int ret;
111
112         dev_dbg(&cfhsi->ndev->dev, "%s.\n",
113                 __func__);
114
115
116         ret = cfhsi->dev->cfhsi_wake_up(cfhsi->dev);
117         if (ret) {
118                 dev_warn(&cfhsi->ndev->dev,
119                         "%s: can't wake up HSI interface: %d.\n",
120                         __func__, ret);
121                 return ret;
122         }
123
124         do {
125                 ret = cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
126                                 &fifo_occupancy);
127                 if (ret) {
128                         dev_warn(&cfhsi->ndev->dev,
129                                 "%s: can't get FIFO occupancy: %d.\n",
130                                 __func__, ret);
131                         break;
132                 } else if (!fifo_occupancy)
133                         /* No more data, exitting normally */
134                         break;
135
136                 fifo_occupancy = min(sizeof(buffer), fifo_occupancy);
137                 set_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
138                 ret = cfhsi->dev->cfhsi_rx(buffer, fifo_occupancy,
139                                 cfhsi->dev);
140                 if (ret) {
141                         clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
142                         dev_warn(&cfhsi->ndev->dev,
143                                 "%s: can't read data: %d.\n",
144                                 __func__, ret);
145                         break;
146                 }
147
148                 ret = 5 * HZ;
149                 wait_event_interruptible_timeout(cfhsi->flush_fifo_wait,
150                          !test_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits), ret);
151
152                 if (ret < 0) {
153                         dev_warn(&cfhsi->ndev->dev,
154                                 "%s: can't wait for flush complete: %d.\n",
155                                 __func__, ret);
156                         break;
157                 } else if (!ret) {
158                         ret = -ETIMEDOUT;
159                         dev_warn(&cfhsi->ndev->dev,
160                                 "%s: timeout waiting for flush complete.\n",
161                                 __func__);
162                         break;
163                 }
164         } while (1);
165
166         cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
167
168         return ret;
169 }
170
171 static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
172 {
173         int nfrms = 0;
174         int pld_len = 0;
175         struct sk_buff *skb;
176         u8 *pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
177
178         skb = skb_dequeue(&cfhsi->qhead);
179         if (!skb)
180                 return 0;
181
182         /* Check if we can embed a CAIF frame. */
183         if (skb->len < CFHSI_MAX_EMB_FRM_SZ) {
184                 struct caif_payload_info *info;
185                 int hpad = 0;
186                 int tpad = 0;
187
188                 /* Calculate needed head alignment and tail alignment. */
189                 info = (struct caif_payload_info *)&skb->cb;
190
191                 hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
192                 tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
193
194                 /* Check if frame still fits with added alignment. */
195                 if ((skb->len + hpad + tpad) <= CFHSI_MAX_EMB_FRM_SZ) {
196                         u8 *pemb = desc->emb_frm;
197                         desc->offset = CFHSI_DESC_SHORT_SZ;
198                         *pemb = (u8)(hpad - 1);
199                         pemb += hpad;
200
201                         /* Update network statistics. */
202                         cfhsi->ndev->stats.tx_packets++;
203                         cfhsi->ndev->stats.tx_bytes += skb->len;
204
205                         /* Copy in embedded CAIF frame. */
206                         skb_copy_bits(skb, 0, pemb, skb->len);
207                         consume_skb(skb);
208                         skb = NULL;
209                 }
210         } else
211                 /* Clear offset. */
212                 desc->offset = 0;
213
214         /* Create payload CAIF frames. */
215         pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
216         while (nfrms < CFHSI_MAX_PKTS) {
217                 struct caif_payload_info *info;
218                 int hpad = 0;
219                 int tpad = 0;
220
221                 if (!skb)
222                         skb = skb_dequeue(&cfhsi->qhead);
223
224                 if (!skb)
225                         break;
226
227                 /* Calculate needed head alignment and tail alignment. */
228                 info = (struct caif_payload_info *)&skb->cb;
229
230                 hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
231                 tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
232
233                 /* Fill in CAIF frame length in descriptor. */
234                 desc->cffrm_len[nfrms] = hpad + skb->len + tpad;
235
236                 /* Fill head padding information. */
237                 *pfrm = (u8)(hpad - 1);
238                 pfrm += hpad;
239
240                 /* Update network statistics. */
241                 cfhsi->ndev->stats.tx_packets++;
242                 cfhsi->ndev->stats.tx_bytes += skb->len;
243
244                 /* Copy in CAIF frame. */
245                 skb_copy_bits(skb, 0, pfrm, skb->len);
246
247                 /* Update payload length. */
248                 pld_len += desc->cffrm_len[nfrms];
249
250                 /* Update frame pointer. */
251                 pfrm += skb->len + tpad;
252                 consume_skb(skb);
253                 skb = NULL;
254
255                 /* Update number of frames. */
256                 nfrms++;
257         }
258
259         /* Unused length fields should be zero-filled (according to SPEC). */
260         while (nfrms < CFHSI_MAX_PKTS) {
261                 desc->cffrm_len[nfrms] = 0x0000;
262                 nfrms++;
263         }
264
265         /* Check if we can piggy-back another descriptor. */
266         skb = skb_peek(&cfhsi->qhead);
267         if (skb)
268                 desc->header |= CFHSI_PIGGY_DESC;
269         else
270                 desc->header &= ~CFHSI_PIGGY_DESC;
271
272         return CFHSI_DESC_SZ + pld_len;
273 }
274
275 static void cfhsi_tx_done_work(struct work_struct *work)
276 {
277         struct cfhsi *cfhsi = NULL;
278         struct cfhsi_desc *desc = NULL;
279         int len = 0;
280         int res;
281
282         cfhsi = container_of(work, struct cfhsi, tx_done_work);
283         dev_dbg(&cfhsi->ndev->dev, "%s.\n",
284                 __func__);
285
286         if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
287                 return;
288
289         desc = (struct cfhsi_desc *)cfhsi->tx_buf;
290
291         do {
292                 /*
293                  * Send flow on if flow off has been previously signalled
294                  * and number of packets is below low water mark.
295                  */
296                 spin_lock_bh(&cfhsi->lock);
297                 if (cfhsi->flow_off_sent &&
298                                 cfhsi->qhead.qlen <= cfhsi->q_low_mark &&
299                                 cfhsi->cfdev.flowctrl) {
300
301                         cfhsi->flow_off_sent = 0;
302                         cfhsi->cfdev.flowctrl(cfhsi->ndev, ON);
303                 }
304                 spin_unlock_bh(&cfhsi->lock);
305
306                 /* Create HSI frame. */
307                 len = cfhsi_tx_frm(desc, cfhsi);
308                 if (!len) {
309                         cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
310                         /* Start inactivity timer. */
311                         mod_timer(&cfhsi->timer,
312                                         jiffies + CFHSI_INACTIVITY_TOUT);
313                         break;
314                 }
315
316                 /* Set up new transfer. */
317                 res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
318                 if (WARN_ON(res < 0)) {
319                         dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
320                                 __func__, res);
321                 }
322         } while (res < 0);
323 }
324
325 static void cfhsi_tx_done_cb(struct cfhsi_drv *drv)
326 {
327         struct cfhsi *cfhsi;
328
329         cfhsi = container_of(drv, struct cfhsi, drv);
330         dev_dbg(&cfhsi->ndev->dev, "%s.\n",
331                 __func__);
332
333         if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
334                 return;
335
336         queue_work(cfhsi->wq, &cfhsi->tx_done_work);
337 }
338
339 static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
340 {
341         int xfer_sz = 0;
342         int nfrms = 0;
343         u16 *plen = NULL;
344         u8 *pfrm = NULL;
345
346         if ((desc->header & ~CFHSI_PIGGY_DESC) ||
347                         (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
348                 dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n",
349                         __func__);
350                 return 0;
351         }
352
353         /* Check for embedded CAIF frame. */
354         if (desc->offset) {
355                 struct sk_buff *skb;
356                 u8 *dst = NULL;
357                 int len = 0, retries = 0;
358                 pfrm = ((u8 *)desc) + desc->offset;
359
360                 /* Remove offset padding. */
361                 pfrm += *pfrm + 1;
362
363                 /* Read length of CAIF frame (little endian). */
364                 len = *pfrm;
365                 len |= ((*(pfrm+1)) << 8) & 0xFF00;
366                 len += 2;       /* Add FCS fields. */
367
368
369                 /* Allocate SKB (OK even in IRQ context). */
370                 skb = alloc_skb(len + 1, GFP_KERNEL);
371                 while (!skb) {
372                         retries++;
373                         schedule_timeout(1);
374                         skb = alloc_skb(len + 1, GFP_KERNEL);
375                         if (skb) {
376                                 printk(KERN_WARNING "%s: slept for %u "
377                                                 "before getting memory\n",
378                                                 __func__, retries);
379                                 break;
380                         }
381                         if (retries > HZ) {
382                                 printk(KERN_ERR "%s: slept for 1HZ and "
383                                                 "did not get memory\n",
384                                                 __func__);
385                                 cfhsi->ndev->stats.rx_dropped++;
386                                 goto drop_frame;
387                         }
388                 }
389                 caif_assert(skb != NULL);
390
391                 dst = skb_put(skb, len);
392                 memcpy(dst, pfrm, len);
393
394                 skb->protocol = htons(ETH_P_CAIF);
395                 skb_reset_mac_header(skb);
396                 skb->dev = cfhsi->ndev;
397
398                 /*
399                  * We are called from a arch specific platform device.
400                  * Unfortunately we don't know what context we're
401                  * running in.
402                  */
403                 if (in_interrupt())
404                         netif_rx(skb);
405                 else
406                         netif_rx_ni(skb);
407
408                 /* Update network statistics. */
409                 cfhsi->ndev->stats.rx_packets++;
410                 cfhsi->ndev->stats.rx_bytes += len;
411         }
412
413 drop_frame:
414         /* Calculate transfer length. */
415         plen = desc->cffrm_len;
416         while (nfrms < CFHSI_MAX_PKTS && *plen) {
417                 xfer_sz += *plen;
418                 plen++;
419                 nfrms++;
420         }
421
422         /* Check for piggy-backed descriptor. */
423         if (desc->header & CFHSI_PIGGY_DESC)
424                 xfer_sz += CFHSI_DESC_SZ;
425
426         if (xfer_sz % 4) {
427                 dev_err(&cfhsi->ndev->dev,
428                                 "%s: Invalid payload len: %d, ignored.\n",
429                         __func__, xfer_sz);
430                 xfer_sz = 0;
431         }
432
433         return xfer_sz;
434 }
435
436 static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
437 {
438         int rx_sz = 0;
439         int nfrms = 0;
440         u16 *plen = NULL;
441         u8 *pfrm = NULL;
442
443         /* Sanity check header and offset. */
444         if (WARN_ON((desc->header & ~CFHSI_PIGGY_DESC) ||
445                         (desc->offset > CFHSI_MAX_EMB_FRM_SZ))) {
446                 dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n",
447                         __func__);
448                 return -EINVAL;
449         }
450
451         /* Set frame pointer to start of payload. */
452         pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
453         plen = desc->cffrm_len;
454         while (nfrms < CFHSI_MAX_PKTS && *plen) {
455                 struct sk_buff *skb;
456                 u8 *dst = NULL;
457                 u8 *pcffrm = NULL;
458                 int len = 0, retries = 0;
459
460                 if (WARN_ON(desc->cffrm_len[nfrms] > CFHSI_MAX_PAYLOAD_SZ)) {
461                         dev_err(&cfhsi->ndev->dev, "%s: Invalid payload.\n",
462                                 __func__);
463                         return -EINVAL;
464                 }
465
466                 /* CAIF frame starts after head padding. */
467                 pcffrm = pfrm + *pfrm + 1;
468
469                 /* Read length of CAIF frame (little endian). */
470                 len = *pcffrm;
471                 len |= ((*(pcffrm + 1)) << 8) & 0xFF00;
472                 len += 2;       /* Add FCS fields. */
473
474                 /* Allocate SKB (OK even in IRQ context). */
475                 skb = alloc_skb(len + 1, GFP_KERNEL);
476                 while (!skb) {
477                         retries++;
478                         schedule_timeout(1);
479                         skb = alloc_skb(len + 1, GFP_KERNEL);
480                         if (skb) {
481                                 printk(KERN_WARNING "%s: slept for %u "
482                                                 "before getting memory\n",
483                                                 __func__, retries);
484                                 break;
485                         }
486                         if (retries > HZ) {
487                                 printk(KERN_ERR "%s: slept for 1HZ "
488                                                 "and did not get memory\n",
489                                                 __func__);
490                                 cfhsi->ndev->stats.rx_dropped++;
491                                 goto drop_frame;
492                         }
493                 }
494                 caif_assert(skb != NULL);
495
496                 dst = skb_put(skb, len);
497                 memcpy(dst, pcffrm, len);
498
499                 skb->protocol = htons(ETH_P_CAIF);
500                 skb_reset_mac_header(skb);
501                 skb->dev = cfhsi->ndev;
502
503                 /*
504                  * We're called from a platform device,
505                  * and don't know the context we're running in.
506                  */
507                 if (in_interrupt())
508                         netif_rx(skb);
509                 else
510                         netif_rx_ni(skb);
511
512                 /* Update network statistics. */
513                 cfhsi->ndev->stats.rx_packets++;
514                 cfhsi->ndev->stats.rx_bytes += len;
515
516 drop_frame:
517                 pfrm += *plen;
518                 rx_sz += *plen;
519                 plen++;
520                 nfrms++;
521         }
522
523         return rx_sz;
524 }
525
526 static void cfhsi_rx_done_work(struct work_struct *work)
527 {
528         int res;
529         int desc_pld_len = 0;
530         struct cfhsi *cfhsi = NULL;
531         struct cfhsi_desc *desc = NULL;
532
533         cfhsi = container_of(work, struct cfhsi, rx_done_work);
534         desc = (struct cfhsi_desc *)cfhsi->rx_buf;
535
536         dev_dbg(&cfhsi->ndev->dev, "%s: Kick timer if pending.\n",
537                 __func__);
538
539         if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
540                 return;
541
542         /* Update inactivity timer if pending. */
543         mod_timer_pending(&cfhsi->timer, jiffies + CFHSI_INACTIVITY_TOUT);
544
545         if (cfhsi->rx_state == CFHSI_RX_STATE_DESC) {
546                 desc_pld_len = cfhsi_rx_desc(desc, cfhsi);
547         } else {
548                 int pld_len;
549
550                 pld_len = cfhsi_rx_pld(desc, cfhsi);
551
552                 if ((pld_len > 0) && (desc->header & CFHSI_PIGGY_DESC)) {
553                         struct cfhsi_desc *piggy_desc;
554                         piggy_desc = (struct cfhsi_desc *)
555                                 (desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ +
556                                                 pld_len);
557
558                         /* Extract piggy-backed descriptor. */
559                         desc_pld_len = cfhsi_rx_desc(piggy_desc, cfhsi);
560
561                         /*
562                          * Copy needed information from the piggy-backed
563                          * descriptor to the descriptor in the start.
564                          */
565                         memcpy((u8 *)desc, (u8 *)piggy_desc,
566                                         CFHSI_DESC_SHORT_SZ);
567                 }
568         }
569
570         if (desc_pld_len) {
571                 cfhsi->rx_state = CFHSI_RX_STATE_PAYLOAD;
572                 cfhsi->rx_ptr = cfhsi->rx_buf + CFHSI_DESC_SZ;
573                 cfhsi->rx_len = desc_pld_len;
574         } else {
575                 cfhsi->rx_state = CFHSI_RX_STATE_DESC;
576                 cfhsi->rx_ptr = cfhsi->rx_buf;
577                 cfhsi->rx_len = CFHSI_DESC_SZ;
578         }
579         clear_bit(CFHSI_PENDING_RX, &cfhsi->bits);
580
581         if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) {
582                 /* Set up new transfer. */
583                 dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n",
584                         __func__);
585                 res = cfhsi->dev->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len,
586                                 cfhsi->dev);
587                 if (WARN_ON(res < 0)) {
588                         dev_err(&cfhsi->ndev->dev, "%s: RX error %d.\n",
589                                 __func__, res);
590                         cfhsi->ndev->stats.rx_errors++;
591                         cfhsi->ndev->stats.rx_dropped++;
592                 }
593         }
594 }
595
596 static void cfhsi_rx_done_cb(struct cfhsi_drv *drv)
597 {
598         struct cfhsi *cfhsi;
599
600         cfhsi = container_of(drv, struct cfhsi, drv);
601         dev_dbg(&cfhsi->ndev->dev, "%s.\n",
602                 __func__);
603
604         if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
605                 return;
606
607         set_bit(CFHSI_PENDING_RX, &cfhsi->bits);
608
609         if (test_and_clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits))
610                 wake_up_interruptible(&cfhsi->flush_fifo_wait);
611         else
612                 queue_work(cfhsi->wq, &cfhsi->rx_done_work);
613 }
614
615 static void cfhsi_wake_up(struct work_struct *work)
616 {
617         struct cfhsi *cfhsi = NULL;
618         int res;
619         int len;
620         long ret;
621
622         cfhsi = container_of(work, struct cfhsi, wake_up_work);
623
624         if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
625                 return;
626
627         if (unlikely(test_bit(CFHSI_AWAKE, &cfhsi->bits))) {
628                 /* It happenes when wakeup is requested by
629                  * both ends at the same time. */
630                 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
631                 return;
632         }
633
634         /* Activate wake line. */
635         cfhsi->dev->cfhsi_wake_up(cfhsi->dev);
636
637         dev_dbg(&cfhsi->ndev->dev, "%s: Start waiting.\n",
638                 __func__);
639
640         /* Wait for acknowledge. */
641         ret = CFHSI_WAKEUP_TOUT;
642         wait_event_interruptible_timeout(cfhsi->wake_up_wait,
643                                         test_bit(CFHSI_WAKE_UP_ACK,
644                                                         &cfhsi->bits), ret);
645         if (unlikely(ret < 0)) {
646                 /* Interrupted by signal. */
647                 dev_info(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n",
648                         __func__, ret);
649                 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
650                 cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
651                 return;
652         } else if (!ret) {
653                 /* Wakeup timeout */
654                 dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n",
655                         __func__);
656                 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
657                 cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
658                 return;
659         }
660         dev_dbg(&cfhsi->ndev->dev, "%s: Woken.\n",
661                 __func__);
662
663         /* Clear power up bit. */
664         set_bit(CFHSI_AWAKE, &cfhsi->bits);
665         clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
666
667         /* Resume read operation. */
668         if (!test_bit(CFHSI_PENDING_RX, &cfhsi->bits)) {
669                 dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n",
670                         __func__);
671                 res = cfhsi->dev->cfhsi_rx(cfhsi->rx_ptr,
672                                 cfhsi->rx_len, cfhsi->dev);
673                 if (WARN_ON(res < 0)) {
674                         dev_err(&cfhsi->ndev->dev, "%s: RX error %d.\n",
675                                 __func__, res);
676                 }
677         }
678
679         /* Clear power up acknowledment. */
680         clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
681
682         spin_lock_bh(&cfhsi->lock);
683
684         /* Resume transmit if queue is not empty. */
685         if (!skb_peek(&cfhsi->qhead)) {
686                 dev_dbg(&cfhsi->ndev->dev, "%s: Peer wake, start timer.\n",
687                         __func__);
688                 /* Start inactivity timer. */
689                 mod_timer(&cfhsi->timer,
690                                 jiffies + CFHSI_INACTIVITY_TOUT);
691                 spin_unlock_bh(&cfhsi->lock);
692                 return;
693         }
694
695         dev_dbg(&cfhsi->ndev->dev, "%s: Host wake.\n",
696                 __func__);
697
698         spin_unlock_bh(&cfhsi->lock);
699
700         /* Create HSI frame. */
701         len = cfhsi_tx_frm((struct cfhsi_desc *)cfhsi->tx_buf, cfhsi);
702
703         if (likely(len > 0)) {
704                 /* Set up new transfer. */
705                 res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
706                 if (WARN_ON(res < 0)) {
707                         dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
708                                 __func__, res);
709                         cfhsi_abort_tx(cfhsi);
710                 }
711         } else {
712                 dev_err(&cfhsi->ndev->dev,
713                                 "%s: Failed to create HSI frame: %d.\n",
714                                 __func__, len);
715         }
716
717 }
718
719 static void cfhsi_wake_down(struct work_struct *work)
720 {
721         long ret;
722         struct cfhsi *cfhsi = NULL;
723         size_t fifo_occupancy;
724
725         cfhsi = container_of(work, struct cfhsi, wake_down_work);
726         dev_dbg(&cfhsi->ndev->dev, "%s.\n",
727                 __func__);
728
729         if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
730                 return;
731
732         /* Check if there is something in FIFO. */
733         if (WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
734                                                         &fifo_occupancy)))
735                 fifo_occupancy = 0;
736
737         if (fifo_occupancy) {
738                 dev_dbg(&cfhsi->ndev->dev,
739                                 "%s: %u words in RX FIFO, restart timer.\n",
740                                 __func__, (unsigned) fifo_occupancy);
741                 spin_lock_bh(&cfhsi->lock);
742                 mod_timer(&cfhsi->timer,
743                                 jiffies + CFHSI_INACTIVITY_TOUT);
744                 spin_unlock_bh(&cfhsi->lock);
745                 return;
746         }
747
748         /* Cancel pending RX requests */
749         cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev);
750
751         /* Deactivate wake line. */
752         cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
753
754         /* Wait for acknowledge. */
755         ret = CFHSI_WAKEUP_TOUT;
756         ret = wait_event_interruptible_timeout(cfhsi->wake_down_wait,
757                                         test_bit(CFHSI_WAKE_DOWN_ACK,
758                                                         &cfhsi->bits),
759                                         ret);
760         if (ret < 0) {
761                 /* Interrupted by signal. */
762                 dev_info(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n",
763                         __func__, ret);
764                 return;
765         } else if (!ret) {
766                 /* Timeout */
767                 dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n",
768                         __func__);
769         }
770
771         /* Clear power down acknowledment. */
772         clear_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
773         clear_bit(CFHSI_AWAKE, &cfhsi->bits);
774
775         /* Check if there is something in FIFO. */
776         if (WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
777                                                         &fifo_occupancy)))
778                 fifo_occupancy = 0;
779
780         if (fifo_occupancy) {
781                 dev_dbg(&cfhsi->ndev->dev,
782                                 "%s: %u words in RX FIFO, wakeup forced.\n",
783                                 __func__, (unsigned) fifo_occupancy);
784                 if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
785                         queue_work(cfhsi->wq, &cfhsi->wake_up_work);
786         } else
787                 dev_dbg(&cfhsi->ndev->dev, "%s: Done.\n",
788                         __func__);
789 }
790
791 static void cfhsi_wake_up_cb(struct cfhsi_drv *drv)
792 {
793         struct cfhsi *cfhsi = NULL;
794
795         cfhsi = container_of(drv, struct cfhsi, drv);
796         dev_dbg(&cfhsi->ndev->dev, "%s.\n",
797                 __func__);
798
799         set_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
800         wake_up_interruptible(&cfhsi->wake_up_wait);
801
802         if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
803                 return;
804
805         /* Schedule wake up work queue if the peer initiates. */
806         if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
807                 queue_work(cfhsi->wq, &cfhsi->wake_up_work);
808 }
809
810 static void cfhsi_wake_down_cb(struct cfhsi_drv *drv)
811 {
812         struct cfhsi *cfhsi = NULL;
813
814         cfhsi = container_of(drv, struct cfhsi, drv);
815         dev_dbg(&cfhsi->ndev->dev, "%s.\n",
816                 __func__);
817
818         /* Initiating low power is only permitted by the host (us). */
819         set_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
820         wake_up_interruptible(&cfhsi->wake_down_wait);
821 }
822
823 static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
824 {
825         struct cfhsi *cfhsi = NULL;
826         int start_xfer = 0;
827         int timer_active;
828
829         if (!dev)
830                 return -EINVAL;
831
832         cfhsi = netdev_priv(dev);
833
834         spin_lock_bh(&cfhsi->lock);
835
836         skb_queue_tail(&cfhsi->qhead, skb);
837
838         /* Sanity check; xmit should not be called after unregister_netdev */
839         if (WARN_ON(test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))) {
840                 spin_unlock_bh(&cfhsi->lock);
841                 cfhsi_abort_tx(cfhsi);
842                 return -EINVAL;
843         }
844
845         /* Send flow off if number of packets is above high water mark. */
846         if (!cfhsi->flow_off_sent &&
847                 cfhsi->qhead.qlen > cfhsi->q_high_mark &&
848                 cfhsi->cfdev.flowctrl) {
849                 cfhsi->flow_off_sent = 1;
850                 cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF);
851         }
852
853         if (cfhsi->tx_state == CFHSI_TX_STATE_IDLE) {
854                 cfhsi->tx_state = CFHSI_TX_STATE_XFER;
855                 start_xfer = 1;
856         }
857
858         spin_unlock_bh(&cfhsi->lock);
859
860         if (!start_xfer)
861                 return 0;
862
863         /* Delete inactivity timer if started. */
864 #ifdef CONFIG_SMP
865         timer_active = del_timer_sync(&cfhsi->timer);
866 #else
867         timer_active = del_timer(&cfhsi->timer);
868 #endif /* CONFIG_SMP */
869
870         if (timer_active) {
871                 struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
872                 int len;
873                 int res;
874
875                 /* Create HSI frame. */
876                 len = cfhsi_tx_frm(desc, cfhsi);
877                 BUG_ON(!len);
878
879                 /* Set up new transfer. */
880                 res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
881                 if (WARN_ON(res < 0)) {
882                         dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
883                                 __func__, res);
884                         cfhsi_abort_tx(cfhsi);
885                 }
886         } else {
887                 /* Schedule wake up work queue if the we initiate. */
888                 if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
889                         queue_work(cfhsi->wq, &cfhsi->wake_up_work);
890         }
891
892         return 0;
893 }
894
895 static int cfhsi_open(struct net_device *dev)
896 {
897         netif_wake_queue(dev);
898
899         return 0;
900 }
901
902 static int cfhsi_close(struct net_device *dev)
903 {
904         netif_stop_queue(dev);
905
906         return 0;
907 }
908
909 static const struct net_device_ops cfhsi_ops = {
910         .ndo_open = cfhsi_open,
911         .ndo_stop = cfhsi_close,
912         .ndo_start_xmit = cfhsi_xmit
913 };
914
915 static void cfhsi_setup(struct net_device *dev)
916 {
917         struct cfhsi *cfhsi = netdev_priv(dev);
918         dev->features = 0;
919         dev->netdev_ops = &cfhsi_ops;
920         dev->type = ARPHRD_CAIF;
921         dev->flags = IFF_POINTOPOINT | IFF_NOARP;
922         dev->mtu = CFHSI_MAX_PAYLOAD_SZ;
923         dev->tx_queue_len = 0;
924         dev->destructor = free_netdev;
925         skb_queue_head_init(&cfhsi->qhead);
926         cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
927         cfhsi->cfdev.use_frag = false;
928         cfhsi->cfdev.use_stx = false;
929         cfhsi->cfdev.use_fcs = false;
930         cfhsi->ndev = dev;
931 }
932
933 int cfhsi_probe(struct platform_device *pdev)
934 {
935         struct cfhsi *cfhsi = NULL;
936         struct net_device *ndev;
937         struct cfhsi_dev *dev;
938         int res;
939
940         ndev = alloc_netdev(sizeof(struct cfhsi), "cfhsi%d", cfhsi_setup);
941         if (!ndev) {
942                 dev_err(&pdev->dev, "%s: alloc_netdev failed.\n",
943                         __func__);
944                 return -ENODEV;
945         }
946
947         cfhsi = netdev_priv(ndev);
948         cfhsi->ndev = ndev;
949         cfhsi->pdev = pdev;
950
951         /* Initialize state vaiables. */
952         cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
953         cfhsi->rx_state = CFHSI_RX_STATE_DESC;
954
955         /* Set flow info */
956         cfhsi->flow_off_sent = 0;
957         cfhsi->q_low_mark = LOW_WATER_MARK;
958         cfhsi->q_high_mark = HIGH_WATER_MARK;
959
960         /* Assign the HSI device. */
961         dev = (struct cfhsi_dev *)pdev->dev.platform_data;
962         cfhsi->dev = dev;
963
964         /* Assign the driver to this HSI device. */
965         dev->drv = &cfhsi->drv;
966
967         /*
968          * Allocate a TX buffer with the size of a HSI packet descriptors
969          * and the necessary room for CAIF payload frames.
970          */
971         cfhsi->tx_buf = kzalloc(CFHSI_BUF_SZ_TX, GFP_KERNEL);
972         if (!cfhsi->tx_buf) {
973                 dev_err(&ndev->dev, "%s: Failed to allocate TX buffer.\n",
974                         __func__);
975                 res = -ENODEV;
976                 goto err_alloc_tx;
977         }
978
979         /*
980          * Allocate a RX buffer with the size of two HSI packet descriptors and
981          * the necessary room for CAIF payload frames.
982          */
983         cfhsi->rx_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL);
984         if (!cfhsi->rx_buf) {
985                 dev_err(&ndev->dev, "%s: Failed to allocate RX buffer.\n",
986                         __func__);
987                 res = -ENODEV;
988                 goto err_alloc_rx;
989         }
990
991         /* Initialize receive variables. */
992         cfhsi->rx_ptr = cfhsi->rx_buf;
993         cfhsi->rx_len = CFHSI_DESC_SZ;
994
995         /* Initialize spin locks. */
996         spin_lock_init(&cfhsi->lock);
997
998         /* Set up the driver. */
999         cfhsi->drv.tx_done_cb = cfhsi_tx_done_cb;
1000         cfhsi->drv.rx_done_cb = cfhsi_rx_done_cb;
1001
1002         /* Initialize the work queues. */
1003         INIT_WORK(&cfhsi->wake_up_work, cfhsi_wake_up);
1004         INIT_WORK(&cfhsi->wake_down_work, cfhsi_wake_down);
1005         INIT_WORK(&cfhsi->rx_done_work, cfhsi_rx_done_work);
1006         INIT_WORK(&cfhsi->tx_done_work, cfhsi_tx_done_work);
1007
1008         /* Clear all bit fields. */
1009         clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
1010         clear_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
1011         clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
1012         clear_bit(CFHSI_AWAKE, &cfhsi->bits);
1013         clear_bit(CFHSI_PENDING_RX, &cfhsi->bits);
1014
1015         /* Create work thread. */
1016         cfhsi->wq = create_singlethread_workqueue(pdev->name);
1017         if (!cfhsi->wq) {
1018                 dev_err(&ndev->dev, "%s: Failed to create work queue.\n",
1019                         __func__);
1020                 res = -ENODEV;
1021                 goto err_create_wq;
1022         }
1023
1024         /* Initialize wait queues. */
1025         init_waitqueue_head(&cfhsi->wake_up_wait);
1026         init_waitqueue_head(&cfhsi->wake_down_wait);
1027         init_waitqueue_head(&cfhsi->flush_fifo_wait);
1028
1029         /* Setup the inactivity timer. */
1030         init_timer(&cfhsi->timer);
1031         cfhsi->timer.data = (unsigned long)cfhsi;
1032         cfhsi->timer.function = cfhsi_inactivity_tout;
1033
1034         /* Add CAIF HSI device to list. */
1035         spin_lock(&cfhsi_list_lock);
1036         list_add_tail(&cfhsi->list, &cfhsi_list);
1037         spin_unlock(&cfhsi_list_lock);
1038
1039         /* Activate HSI interface. */
1040         res = cfhsi->dev->cfhsi_up(cfhsi->dev);
1041         if (res) {
1042                 dev_err(&cfhsi->ndev->dev,
1043                         "%s: can't activate HSI interface: %d.\n",
1044                         __func__, res);
1045                 goto err_activate;
1046         }
1047
1048         /* Flush FIFO */
1049         res = cfhsi_flush_fifo(cfhsi);
1050         if (res) {
1051                 dev_err(&ndev->dev, "%s: Can't flush FIFO: %d.\n",
1052                         __func__, res);
1053                 goto err_net_reg;
1054         }
1055
1056         cfhsi->drv.wake_up_cb = cfhsi_wake_up_cb;
1057         cfhsi->drv.wake_down_cb = cfhsi_wake_down_cb;
1058
1059         /* Register network device. */
1060         res = register_netdev(ndev);
1061         if (res) {
1062                 dev_err(&ndev->dev, "%s: Registration error: %d.\n",
1063                         __func__, res);
1064                 goto err_net_reg;
1065         }
1066
1067         netif_stop_queue(ndev);
1068
1069         return res;
1070
1071  err_net_reg:
1072         cfhsi->dev->cfhsi_down(cfhsi->dev);
1073  err_activate:
1074         destroy_workqueue(cfhsi->wq);
1075  err_create_wq:
1076         kfree(cfhsi->rx_buf);
1077  err_alloc_rx:
1078         kfree(cfhsi->tx_buf);
1079  err_alloc_tx:
1080         free_netdev(ndev);
1081
1082         return res;
1083 }
1084
1085 static void cfhsi_shutdown(struct cfhsi *cfhsi, bool remove_platform_dev)
1086 {
1087         u8 *tx_buf, *rx_buf;
1088
1089         /* Stop TXing */
1090         netif_tx_stop_all_queues(cfhsi->ndev);
1091
1092         /* going to shutdown driver */
1093         set_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
1094
1095         if (remove_platform_dev) {
1096                 /* Flush workqueue */
1097                 flush_workqueue(cfhsi->wq);
1098
1099                 /* Notify device. */
1100                 platform_device_unregister(cfhsi->pdev);
1101         }
1102
1103         /* Flush workqueue */
1104         flush_workqueue(cfhsi->wq);
1105
1106         /* Delete timer if pending */
1107 #ifdef CONFIG_SMP
1108         del_timer_sync(&cfhsi->timer);
1109 #else
1110         del_timer(&cfhsi->timer);
1111 #endif /* CONFIG_SMP */
1112
1113         /* Cancel pending RX request (if any) */
1114         cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev);
1115
1116         /* Flush again and destroy workqueue */
1117         destroy_workqueue(cfhsi->wq);
1118
1119         /* Store bufferes: will be freed later. */
1120         tx_buf = cfhsi->tx_buf;
1121         rx_buf = cfhsi->rx_buf;
1122
1123         /* Flush transmit queues. */
1124         cfhsi_abort_tx(cfhsi);
1125
1126         /* Deactivate interface */
1127         cfhsi->dev->cfhsi_down(cfhsi->dev);
1128
1129         /* Finally unregister the network device. */
1130         unregister_netdev(cfhsi->ndev);
1131
1132         /* Free buffers. */
1133         kfree(tx_buf);
1134         kfree(rx_buf);
1135 }
1136
1137 int cfhsi_remove(struct platform_device *pdev)
1138 {
1139         struct list_head *list_node;
1140         struct list_head *n;
1141         struct cfhsi *cfhsi = NULL;
1142         struct cfhsi_dev *dev;
1143
1144         dev = (struct cfhsi_dev *)pdev->dev.platform_data;
1145         spin_lock(&cfhsi_list_lock);
1146         list_for_each_safe(list_node, n, &cfhsi_list) {
1147                 cfhsi = list_entry(list_node, struct cfhsi, list);
1148                 /* Find the corresponding device. */
1149                 if (cfhsi->dev == dev) {
1150                         /* Remove from list. */
1151                         list_del(list_node);
1152                         spin_unlock(&cfhsi_list_lock);
1153
1154                         /* Shutdown driver. */
1155                         cfhsi_shutdown(cfhsi, false);
1156
1157                         return 0;
1158                 }
1159         }
1160         spin_unlock(&cfhsi_list_lock);
1161         return -ENODEV;
1162 }
1163
1164 struct platform_driver cfhsi_plat_drv = {
1165         .probe = cfhsi_probe,
1166         .remove = cfhsi_remove,
1167         .driver = {
1168                    .name = "cfhsi",
1169                    .owner = THIS_MODULE,
1170                    },
1171 };
1172
1173 static void __exit cfhsi_exit_module(void)
1174 {
1175         struct list_head *list_node;
1176         struct list_head *n;
1177         struct cfhsi *cfhsi = NULL;
1178
1179         spin_lock(&cfhsi_list_lock);
1180         list_for_each_safe(list_node, n, &cfhsi_list) {
1181                 cfhsi = list_entry(list_node, struct cfhsi, list);
1182
1183                 /* Remove from list. */
1184                 list_del(list_node);
1185                 spin_unlock(&cfhsi_list_lock);
1186
1187                 /* Shutdown driver. */
1188                 cfhsi_shutdown(cfhsi, true);
1189
1190                 spin_lock(&cfhsi_list_lock);
1191         }
1192         spin_unlock(&cfhsi_list_lock);
1193
1194         /* Unregister platform driver. */
1195         platform_driver_unregister(&cfhsi_plat_drv);
1196 }
1197
1198 static int __init cfhsi_init_module(void)
1199 {
1200         int result;
1201
1202         /* Initialize spin lock. */
1203         spin_lock_init(&cfhsi_list_lock);
1204
1205         /* Register platform driver. */
1206         result = platform_driver_register(&cfhsi_plat_drv);
1207         if (result) {
1208                 printk(KERN_ERR "Could not register platform HSI driver: %d.\n",
1209                         result);
1210                 goto err_dev_register;
1211         }
1212
1213         return result;
1214
1215  err_dev_register:
1216         return result;
1217 }
1218
1219 module_init(cfhsi_init_module);
1220 module_exit(cfhsi_exit_module);