pasemi_mac: Convert to new dma library
[linux-2.6.git] / drivers / net / pasemi_mac.c
1 /*
2  * Copyright (C) 2006-2007 PA Semi, Inc
3  *
4  * Driver for the PA Semi PWRficient onchip 1G/10G Ethernet MACs
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
18  */
19
20 #include <linux/init.h>
21 #include <linux/module.h>
22 #include <linux/pci.h>
23 #include <linux/interrupt.h>
24 #include <linux/dmaengine.h>
25 #include <linux/delay.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <asm/dma-mapping.h>
29 #include <linux/in.h>
30 #include <linux/skbuff.h>
31
32 #include <linux/ip.h>
33 #include <linux/tcp.h>
34 #include <net/checksum.h>
35
36 #include <asm/irq.h>
37 #include <asm/firmware.h>
38 #include <asm/pasemi_dma.h>
39
40 #include "pasemi_mac.h"
41
42 /* We have our own align, since ppc64 in general has it at 0 because
43  * of design flaws in some of the server bridge chips. However, for
44  * PWRficient doing the unaligned copies is more expensive than doing
45  * unaligned DMA, so make sure the data is aligned instead.
46  */
47 #define LOCAL_SKB_ALIGN 2
48
49 /* TODO list
50  *
51  * - Multicast support
52  * - Large MTU support
53  * - SW LRO
54  * - Multiqueue RX/TX
55  */
56
57
58 /* Must be a power of two */
59 #define RX_RING_SIZE 4096
60 #define TX_RING_SIZE 4096
61
62 #define DEFAULT_MSG_ENABLE        \
63         (NETIF_MSG_DRV          | \
64          NETIF_MSG_PROBE        | \
65          NETIF_MSG_LINK         | \
66          NETIF_MSG_TIMER        | \
67          NETIF_MSG_IFDOWN       | \
68          NETIF_MSG_IFUP         | \
69          NETIF_MSG_RX_ERR       | \
70          NETIF_MSG_TX_ERR)
71
72 #define TX_DESC(tx, num)        ((tx)->chan.ring_virt[(num) & (TX_RING_SIZE-1)])
73 #define TX_DESC_INFO(tx, num)   ((tx)->ring_info[(num) & (TX_RING_SIZE-1)])
74 #define RX_DESC(rx, num)        ((rx)->chan.ring_virt[(num) & (RX_RING_SIZE-1)])
75 #define RX_DESC_INFO(rx, num)   ((rx)->ring_info[(num) & (RX_RING_SIZE-1)])
76 #define RX_BUFF(rx, num)        ((rx)->buffers[(num) & (RX_RING_SIZE-1)])
77
78 #define RING_USED(ring)         (((ring)->next_to_fill - (ring)->next_to_clean) \
79                                  & ((ring)->size - 1))
80 #define RING_AVAIL(ring)        ((ring->size) - RING_USED(ring))
81
82 #define BUF_SIZE 1646 /* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
83
84 MODULE_LICENSE("GPL");
85 MODULE_AUTHOR ("Olof Johansson <olof@lixom.net>");
86 MODULE_DESCRIPTION("PA Semi PWRficient Ethernet driver");
87
88 static int debug = -1;  /* -1 == use DEFAULT_MSG_ENABLE as value */
89 module_param(debug, int, 0);
90 MODULE_PARM_DESC(debug, "PA Semi MAC bitmapped debugging message enable value");
91
92 static int translation_enabled(void)
93 {
94 #if defined(CONFIG_PPC_PASEMI_IOMMU_DMA_FORCE)
95         return 1;
96 #else
97         return firmware_has_feature(FW_FEATURE_LPAR);
98 #endif
99 }
100
101 static void write_iob_reg(unsigned int reg, unsigned int val)
102 {
103         pasemi_write_iob_reg(reg, val);
104 }
105
106 static unsigned int read_mac_reg(struct pasemi_mac *mac, unsigned int reg)
107 {
108         return pasemi_read_mac_reg(mac->dma_if, reg);
109 }
110
111 static void write_mac_reg(struct pasemi_mac *mac, unsigned int reg,
112                           unsigned int val)
113 {
114         pasemi_write_mac_reg(mac->dma_if, reg, val);
115 }
116
117 static unsigned int read_dma_reg(unsigned int reg)
118 {
119         return pasemi_read_dma_reg(reg);
120 }
121
122 static void write_dma_reg(unsigned int reg, unsigned int val)
123 {
124         pasemi_write_dma_reg(reg, val);
125 }
126
127 static struct pasemi_mac_rxring *rx_ring(struct pasemi_mac *mac)
128 {
129         return mac->rx;
130 }
131
132 static struct pasemi_mac_txring *tx_ring(struct pasemi_mac *mac)
133 {
134         return mac->tx;
135 }
136
137 static int mac_to_intf(struct pasemi_mac *mac)
138 {
139         struct pci_dev *pdev = mac->pdev;
140         u32 tmp;
141         int nintf, off, i, j;
142         int devfn = pdev->devfn;
143
144         tmp = read_dma_reg(PAS_DMA_CAP_IFI);
145         nintf = (tmp & PAS_DMA_CAP_IFI_NIN_M) >> PAS_DMA_CAP_IFI_NIN_S;
146         off = (tmp & PAS_DMA_CAP_IFI_IOFF_M) >> PAS_DMA_CAP_IFI_IOFF_S;
147
148         /* IOFF contains the offset to the registers containing the
149          * DMA interface-to-MAC-pci-id mappings, and NIN contains number
150          * of total interfaces. Each register contains 4 devfns.
151          * Just do a linear search until we find the devfn of the MAC
152          * we're trying to look up.
153          */
154
155         for (i = 0; i < (nintf+3)/4; i++) {
156                 tmp = read_dma_reg(off+4*i);
157                 for (j = 0; j < 4; j++) {
158                         if (((tmp >> (8*j)) & 0xff) == devfn)
159                                 return i*4 + j;
160                 }
161         }
162         return -1;
163 }
164
165 static int pasemi_get_mac_addr(struct pasemi_mac *mac)
166 {
167         struct pci_dev *pdev = mac->pdev;
168         struct device_node *dn = pci_device_to_OF_node(pdev);
169         int len;
170         const u8 *maddr;
171         u8 addr[6];
172
173         if (!dn) {
174                 dev_dbg(&pdev->dev,
175                           "No device node for mac, not configuring\n");
176                 return -ENOENT;
177         }
178
179         maddr = of_get_property(dn, "local-mac-address", &len);
180
181         if (maddr && len == 6) {
182                 memcpy(mac->mac_addr, maddr, 6);
183                 return 0;
184         }
185
186         /* Some old versions of firmware mistakenly uses mac-address
187          * (and as a string) instead of a byte array in local-mac-address.
188          */
189
190         if (maddr == NULL)
191                 maddr = of_get_property(dn, "mac-address", NULL);
192
193         if (maddr == NULL) {
194                 dev_warn(&pdev->dev,
195                          "no mac address in device tree, not configuring\n");
196                 return -ENOENT;
197         }
198
199
200         if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &addr[0],
201                    &addr[1], &addr[2], &addr[3], &addr[4], &addr[5]) != 6) {
202                 dev_warn(&pdev->dev,
203                          "can't parse mac address, not configuring\n");
204                 return -EINVAL;
205         }
206
207         memcpy(mac->mac_addr, addr, 6);
208
209         return 0;
210 }
211
212 static int pasemi_mac_unmap_tx_skb(struct pasemi_mac *mac,
213                                     struct sk_buff *skb,
214                                     dma_addr_t *dmas)
215 {
216         int f;
217         int nfrags = skb_shinfo(skb)->nr_frags;
218
219         pci_unmap_single(mac->dma_pdev, dmas[0], skb_headlen(skb),
220                          PCI_DMA_TODEVICE);
221
222         for (f = 0; f < nfrags; f++) {
223                 skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
224
225                 pci_unmap_page(mac->dma_pdev, dmas[f+1], frag->size,
226                                PCI_DMA_TODEVICE);
227         }
228         dev_kfree_skb_irq(skb);
229
230         /* Freed descriptor slot + main SKB ptr + nfrags additional ptrs,
231          * aligned up to a power of 2
232          */
233         return (nfrags + 3) & ~1;
234 }
235
236 static int pasemi_mac_setup_rx_resources(struct net_device *dev)
237 {
238         struct pasemi_mac_rxring *ring;
239         struct pasemi_mac *mac = netdev_priv(dev);
240         int chno;
241         unsigned int cfg;
242
243         ring = pasemi_dma_alloc_chan(RXCHAN, sizeof(struct pasemi_mac_rxring),
244                                      offsetof(struct pasemi_mac_rxring, chan));
245
246         if (!ring) {
247                 dev_err(&mac->pdev->dev, "Can't allocate RX channel\n");
248                 goto out_chan;
249         }
250         chno = ring->chan.chno;
251
252         spin_lock_init(&ring->lock);
253
254         ring->size = RX_RING_SIZE;
255         ring->ring_info = kzalloc(sizeof(struct pasemi_mac_buffer) *
256                                   RX_RING_SIZE, GFP_KERNEL);
257
258         if (!ring->ring_info)
259                 goto out_ring_info;
260
261         /* Allocate descriptors */
262         if (pasemi_dma_alloc_ring(&ring->chan, RX_RING_SIZE))
263                 goto out_ring_desc;
264
265         ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev,
266                                            RX_RING_SIZE * sizeof(u64),
267                                            &ring->buf_dma, GFP_KERNEL);
268         if (!ring->buffers)
269                 goto out_ring_desc;
270
271         memset(ring->buffers, 0, RX_RING_SIZE * sizeof(u64));
272
273         write_dma_reg(PAS_DMA_RXCHAN_BASEL(chno),
274                       PAS_DMA_RXCHAN_BASEL_BRBL(ring->chan.ring_dma));
275
276         write_dma_reg(PAS_DMA_RXCHAN_BASEU(chno),
277                       PAS_DMA_RXCHAN_BASEU_BRBH(ring->chan.ring_dma >> 32) |
278                       PAS_DMA_RXCHAN_BASEU_SIZ(RX_RING_SIZE >> 3));
279
280         cfg = PAS_DMA_RXCHAN_CFG_HBU(1);
281
282         if (translation_enabled())
283                 cfg |= PAS_DMA_RXCHAN_CFG_CTR;
284
285         write_dma_reg(PAS_DMA_RXCHAN_CFG(chno), cfg);
286
287         write_dma_reg(PAS_DMA_RXINT_BASEL(mac->dma_if),
288                       PAS_DMA_RXINT_BASEL_BRBL(ring->buf_dma));
289
290         write_dma_reg(PAS_DMA_RXINT_BASEU(mac->dma_if),
291                       PAS_DMA_RXINT_BASEU_BRBH(ring->buf_dma >> 32) |
292                       PAS_DMA_RXINT_BASEU_SIZ(RX_RING_SIZE >> 3));
293
294         cfg = PAS_DMA_RXINT_CFG_DHL(1) | PAS_DMA_RXINT_CFG_L2 |
295               PAS_DMA_RXINT_CFG_LW | PAS_DMA_RXINT_CFG_RBP |
296               PAS_DMA_RXINT_CFG_HEN;
297
298         if (translation_enabled())
299                 cfg |= PAS_DMA_RXINT_CFG_ITRR | PAS_DMA_RXINT_CFG_ITR;
300
301         write_dma_reg(PAS_DMA_RXINT_CFG(mac->dma_if), cfg);
302
303         ring->next_to_fill = 0;
304         ring->next_to_clean = 0;
305         ring->mac = mac;
306         mac->rx = ring;
307
308         return 0;
309
310 out_ring_desc:
311         kfree(ring->ring_info);
312 out_ring_info:
313         pasemi_dma_free_chan(&ring->chan);
314 out_chan:
315         return -ENOMEM;
316 }
317
318 static struct pasemi_mac_txring *
319 pasemi_mac_setup_tx_resources(struct net_device *dev)
320 {
321         struct pasemi_mac *mac = netdev_priv(dev);
322         u32 val;
323         struct pasemi_mac_txring *ring;
324         unsigned int cfg;
325         int chno;
326
327         ring = pasemi_dma_alloc_chan(TXCHAN, sizeof(struct pasemi_mac_txring),
328                                      offsetof(struct pasemi_mac_txring, chan));
329
330         if (!ring) {
331                 dev_err(&mac->pdev->dev, "Can't allocate TX channel\n");
332                 goto out_chan;
333         }
334
335         chno = ring->chan.chno;
336
337         spin_lock_init(&ring->lock);
338
339         ring->size = TX_RING_SIZE;
340         ring->ring_info = kzalloc(sizeof(struct pasemi_mac_buffer) *
341                                   TX_RING_SIZE, GFP_KERNEL);
342         if (!ring->ring_info)
343                 goto out_ring_info;
344
345         /* Allocate descriptors */
346         if (pasemi_dma_alloc_ring(&ring->chan, TX_RING_SIZE))
347                 goto out_ring_desc;
348
349         write_dma_reg(PAS_DMA_TXCHAN_BASEL(chno),
350                       PAS_DMA_TXCHAN_BASEL_BRBL(ring->chan.ring_dma));
351         val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->chan.ring_dma >> 32);
352         val |= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE >> 3);
353
354         write_dma_reg(PAS_DMA_TXCHAN_BASEU(chno), val);
355
356         cfg = PAS_DMA_TXCHAN_CFG_TY_IFACE |
357               PAS_DMA_TXCHAN_CFG_TATTR(mac->dma_if) |
358               PAS_DMA_TXCHAN_CFG_UP |
359               PAS_DMA_TXCHAN_CFG_WT(2);
360
361         if (translation_enabled())
362                 cfg |= PAS_DMA_TXCHAN_CFG_TRD | PAS_DMA_TXCHAN_CFG_TRR;
363
364         write_dma_reg(PAS_DMA_TXCHAN_CFG(chno), cfg);
365
366         ring->next_to_fill = 0;
367         ring->next_to_clean = 0;
368         ring->mac = mac;
369
370         return ring;
371
372 out_ring_desc:
373         kfree(ring->ring_info);
374 out_ring_info:
375         pasemi_dma_free_chan(&ring->chan);
376 out_chan:
377         return NULL;
378 }
379
380 static void pasemi_mac_free_tx_resources(struct pasemi_mac *mac)
381 {
382         struct pasemi_mac_txring *txring = tx_ring(mac);
383         unsigned int i, j;
384         struct pasemi_mac_buffer *info;
385         dma_addr_t dmas[MAX_SKB_FRAGS+1];
386         int freed;
387         int start, limit;
388
389         start = txring->next_to_clean;
390         limit = txring->next_to_fill;
391
392         /* Compensate for when fill has wrapped and clean has not */
393         if (start > limit)
394                 limit += TX_RING_SIZE;
395
396         for (i = start; i < limit; i += freed) {
397                 info = &txring->ring_info[(i+1) & (TX_RING_SIZE-1)];
398                 if (info->dma && info->skb) {
399                         for (j = 0; j <= skb_shinfo(info->skb)->nr_frags; j++)
400                                 dmas[j] = txring->ring_info[(i+1+j) &
401                                                 (TX_RING_SIZE-1)].dma;
402                         freed = pasemi_mac_unmap_tx_skb(mac, info->skb, dmas);
403                 } else
404                         freed = 2;
405         }
406
407         kfree(txring->ring_info);
408         pasemi_dma_free_chan(&txring->chan);
409
410 }
411
412 static void pasemi_mac_free_rx_resources(struct pasemi_mac *mac)
413 {
414         struct pasemi_mac_rxring *rx = rx_ring(mac);
415         unsigned int i;
416         struct pasemi_mac_buffer *info;
417
418         for (i = 0; i < RX_RING_SIZE; i++) {
419                 info = &RX_DESC_INFO(rx, i);
420                 if (info->skb && info->dma) {
421                         pci_unmap_single(mac->dma_pdev,
422                                          info->dma,
423                                          info->skb->len,
424                                          PCI_DMA_FROMDEVICE);
425                         dev_kfree_skb_any(info->skb);
426                 }
427                 info->dma = 0;
428                 info->skb = NULL;
429         }
430
431         for (i = 0; i < RX_RING_SIZE; i++)
432                 RX_DESC(rx, i) = 0;
433
434         dma_free_coherent(&mac->dma_pdev->dev, RX_RING_SIZE * sizeof(u64),
435                           rx_ring(mac)->buffers, rx_ring(mac)->buf_dma);
436
437         kfree(rx_ring(mac)->ring_info);
438         pasemi_dma_free_chan(&rx_ring(mac)->chan);
439         mac->rx = NULL;
440 }
441
442 static void pasemi_mac_replenish_rx_ring(struct net_device *dev, int limit)
443 {
444         struct pasemi_mac *mac = netdev_priv(dev);
445         struct pasemi_mac_rxring *rx = rx_ring(mac);
446         int fill, count;
447
448         if (limit <= 0)
449                 return;
450
451         fill = rx_ring(mac)->next_to_fill;
452         for (count = 0; count < limit; count++) {
453                 struct pasemi_mac_buffer *info = &RX_DESC_INFO(rx, fill);
454                 u64 *buff = &RX_BUFF(rx, fill);
455                 struct sk_buff *skb;
456                 dma_addr_t dma;
457
458                 /* Entry in use? */
459                 WARN_ON(*buff);
460
461                 /* skb might still be in there for recycle on short receives */
462                 if (info->skb)
463                         skb = info->skb;
464                 else {
465                         skb = dev_alloc_skb(BUF_SIZE);
466                         skb_reserve(skb, LOCAL_SKB_ALIGN);
467                 }
468
469                 if (unlikely(!skb))
470                         break;
471
472                 dma = pci_map_single(mac->dma_pdev, skb->data,
473                                      BUF_SIZE - LOCAL_SKB_ALIGN,
474                                      PCI_DMA_FROMDEVICE);
475
476                 if (unlikely(dma_mapping_error(dma))) {
477                         dev_kfree_skb_irq(info->skb);
478                         break;
479                 }
480
481                 info->skb = skb;
482                 info->dma = dma;
483                 *buff = XCT_RXB_LEN(BUF_SIZE) | XCT_RXB_ADDR(dma);
484                 fill++;
485         }
486
487         wmb();
488
489         write_dma_reg(PAS_DMA_RXINT_INCR(mac->dma_if), count);
490
491         rx_ring(mac)->next_to_fill = (rx_ring(mac)->next_to_fill + count) &
492                                 (RX_RING_SIZE - 1);
493 }
494
495 static void pasemi_mac_restart_rx_intr(struct pasemi_mac *mac)
496 {
497         unsigned int reg, pcnt;
498         /* Re-enable packet count interrupts: finally
499          * ack the packet count interrupt we got in rx_intr.
500          */
501
502         pcnt = *rx_ring(mac)->chan.status & PAS_STATUS_PCNT_M;
503
504         reg = PAS_IOB_DMA_RXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_RXCH_RESET_PINTC;
505
506         write_iob_reg(PAS_IOB_DMA_RXCH_RESET(mac->rx->chan.chno), reg);
507 }
508
509 static void pasemi_mac_restart_tx_intr(struct pasemi_mac *mac)
510 {
511         unsigned int reg, pcnt;
512
513         /* Re-enable packet count interrupts */
514         pcnt = *tx_ring(mac)->chan.status & PAS_STATUS_PCNT_M;
515
516         reg = PAS_IOB_DMA_TXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_TXCH_RESET_PINTC;
517
518         write_iob_reg(PAS_IOB_DMA_TXCH_RESET(tx_ring(mac)->chan.chno), reg);
519 }
520
521
522 static inline void pasemi_mac_rx_error(struct pasemi_mac *mac, u64 macrx)
523 {
524         unsigned int rcmdsta, ccmdsta;
525         struct pasemi_dmachan *chan = &rx_ring(mac)->chan;
526
527         if (!netif_msg_rx_err(mac))
528                 return;
529
530         rcmdsta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if));
531         ccmdsta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(chan->chno));
532
533         printk(KERN_ERR "pasemi_mac: rx error. macrx %016lx, rx status %lx\n",
534                 macrx, *chan->status);
535
536         printk(KERN_ERR "pasemi_mac: rcmdsta %08x ccmdsta %08x\n",
537                 rcmdsta, ccmdsta);
538 }
539
540 static inline void pasemi_mac_tx_error(struct pasemi_mac *mac, u64 mactx)
541 {
542         unsigned int cmdsta;
543         struct pasemi_dmachan *chan = &tx_ring(mac)->chan;
544
545         if (!netif_msg_tx_err(mac))
546                 return;
547
548         cmdsta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(chan->chno));
549
550         printk(KERN_ERR "pasemi_mac: tx error. mactx 0x%016lx, "\
551                 "tx status 0x%016lx\n", mactx, *chan->status);
552
553         printk(KERN_ERR "pasemi_mac: tcmdsta 0x%08x\n", cmdsta);
554 }
555
556 static int pasemi_mac_clean_rx(struct pasemi_mac_rxring *rx, int limit)
557 {
558         struct pasemi_dmachan *chan = &rx->chan;
559         struct pasemi_mac *mac = rx->mac;
560         unsigned int n;
561         int count;
562         struct pasemi_mac_buffer *info;
563         struct sk_buff *skb;
564         unsigned int len;
565         u64 macrx;
566         dma_addr_t dma;
567         int buf_index;
568         u64 eval;
569
570         spin_lock(&rx->lock);
571
572         n = rx->next_to_clean;
573
574         prefetch(&RX_DESC(rx, n));
575
576         for (count = 0; count < limit; count++) {
577                 macrx = RX_DESC(rx, n);
578
579                 if ((macrx & XCT_MACRX_E) ||
580                     (*chan->status & PAS_STATUS_ERROR))
581                         pasemi_mac_rx_error(mac, macrx);
582
583                 if (!(macrx & XCT_MACRX_O))
584                         break;
585
586                 info = NULL;
587
588                 BUG_ON(!(macrx & XCT_MACRX_RR_8BRES));
589
590                 eval = (RX_DESC(rx, n+1) & XCT_RXRES_8B_EVAL_M) >>
591                         XCT_RXRES_8B_EVAL_S;
592                 buf_index = eval-1;
593
594                 dma = (RX_DESC(rx, n+2) & XCT_PTR_ADDR_M);
595                 info = &RX_DESC_INFO(rx, buf_index);
596
597                 skb = info->skb;
598
599                 prefetch(skb);
600                 prefetch(&skb->data_len);
601
602                 len = (macrx & XCT_MACRX_LLEN_M) >> XCT_MACRX_LLEN_S;
603
604                 pci_unmap_single(mac->dma_pdev, dma, len, PCI_DMA_FROMDEVICE);
605
606                 if (macrx & XCT_MACRX_CRC) {
607                         /* CRC error flagged */
608                         mac->netdev->stats.rx_errors++;
609                         mac->netdev->stats.rx_crc_errors++;
610                         /* No need to free skb, it'll be reused */
611                         goto next;
612                 }
613
614                 if (len < 256) {
615                         struct sk_buff *new_skb;
616
617                         new_skb = netdev_alloc_skb(mac->netdev,
618                                                    len + LOCAL_SKB_ALIGN);
619                         if (new_skb) {
620                                 skb_reserve(new_skb, LOCAL_SKB_ALIGN);
621                                 memcpy(new_skb->data, skb->data, len);
622                                 /* save the skb in buffer_info as good */
623                                 skb = new_skb;
624                         }
625                         /* else just continue with the old one */
626                 } else
627                         info->skb = NULL;
628
629                 info->dma = 0;
630
631                 /* Don't include CRC */
632                 skb_put(skb, len-4);
633
634                 if (likely((macrx & XCT_MACRX_HTY_M) == XCT_MACRX_HTY_IPV4_OK)) {
635                         skb->ip_summed = CHECKSUM_UNNECESSARY;
636                         skb->csum = (macrx & XCT_MACRX_CSUM_M) >>
637                                            XCT_MACRX_CSUM_S;
638                 } else
639                         skb->ip_summed = CHECKSUM_NONE;
640
641                 mac->netdev->stats.rx_bytes += len;
642                 mac->netdev->stats.rx_packets++;
643
644                 skb->protocol = eth_type_trans(skb, mac->netdev);
645                 netif_receive_skb(skb);
646
647 next:
648                 RX_DESC(rx, n) = 0;
649                 RX_DESC(rx, n+1) = 0;
650
651                 /* Need to zero it out since hardware doesn't, since the
652                  * replenish loop uses it to tell when it's done.
653                  */
654                 RX_BUFF(rx, buf_index) = 0;
655
656                 n += 4;
657         }
658
659         if (n > RX_RING_SIZE) {
660                 /* Errata 5971 workaround: L2 target of headers */
661                 write_iob_reg(PAS_IOB_COM_PKTHDRCNT, 0);
662                 n &= (RX_RING_SIZE-1);
663         }
664
665         rx_ring(mac)->next_to_clean = n;
666
667         /* Increase is in number of 16-byte entries, and since each descriptor
668          * with an 8BRES takes up 3x8 bytes (padded to 4x8), increase with
669          * count*2.
670          */
671         write_dma_reg(PAS_DMA_RXCHAN_INCR(mac->rx->chan.chno), count << 1);
672
673         pasemi_mac_replenish_rx_ring(mac->netdev, count);
674
675         spin_unlock(&rx_ring(mac)->lock);
676
677         return count;
678 }
679
680 /* Can't make this too large or we blow the kernel stack limits */
681 #define TX_CLEAN_BATCHSIZE (128/MAX_SKB_FRAGS)
682
683 static int pasemi_mac_clean_tx(struct pasemi_mac_txring *txring)
684 {
685         struct pasemi_dmachan *chan = &txring->chan;
686         struct pasemi_mac *mac = txring->mac;
687         int i, j;
688         unsigned int start, descr_count, buf_count, batch_limit;
689         unsigned int ring_limit;
690         unsigned int total_count;
691         unsigned long flags;
692         struct sk_buff *skbs[TX_CLEAN_BATCHSIZE];
693         dma_addr_t dmas[TX_CLEAN_BATCHSIZE][MAX_SKB_FRAGS+1];
694
695         total_count = 0;
696         batch_limit = TX_CLEAN_BATCHSIZE;
697 restart:
698         spin_lock_irqsave(&txring->lock, flags);
699
700         start = txring->next_to_clean;
701         ring_limit = txring->next_to_fill;
702
703         /* Compensate for when fill has wrapped but clean has not */
704         if (start > ring_limit)
705                 ring_limit += TX_RING_SIZE;
706
707         buf_count = 0;
708         descr_count = 0;
709
710         for (i = start;
711              descr_count < batch_limit && i < ring_limit;
712              i += buf_count) {
713                 u64 mactx = TX_DESC(txring, i);
714                 struct sk_buff *skb;
715
716                 if ((mactx  & XCT_MACTX_E) ||
717                     (*chan->status & PAS_STATUS_ERROR))
718                         pasemi_mac_tx_error(mac, mactx);
719
720                 if (unlikely(mactx & XCT_MACTX_O))
721                         /* Not yet transmitted */
722                         break;
723
724                 skb = TX_DESC_INFO(txring, i+1).skb;
725                 skbs[descr_count] = skb;
726
727                 buf_count = 2 + skb_shinfo(skb)->nr_frags;
728                 for (j = 0; j <= skb_shinfo(skb)->nr_frags; j++)
729                         dmas[descr_count][j] = TX_DESC_INFO(txring, i+1+j).dma;
730
731                 TX_DESC(txring, i) = 0;
732                 TX_DESC(txring, i+1) = 0;
733
734                 /* Since we always fill with an even number of entries, make
735                  * sure we skip any unused one at the end as well.
736                  */
737                 if (buf_count & 1)
738                         buf_count++;
739                 descr_count++;
740         }
741         txring->next_to_clean = i & (TX_RING_SIZE-1);
742
743         spin_unlock_irqrestore(&txring->lock, flags);
744         netif_wake_queue(mac->netdev);
745
746         for (i = 0; i < descr_count; i++)
747                 pasemi_mac_unmap_tx_skb(mac, skbs[i], dmas[i]);
748
749         total_count += descr_count;
750
751         /* If the batch was full, try to clean more */
752         if (descr_count == batch_limit)
753                 goto restart;
754
755         return total_count;
756 }
757
758
759 static irqreturn_t pasemi_mac_rx_intr(int irq, void *data)
760 {
761         struct pasemi_mac_rxring *rxring = data;
762         struct pasemi_mac *mac = rxring->mac;
763         struct net_device *dev = mac->netdev;
764         struct pasemi_dmachan *chan = &rxring->chan;
765         unsigned int reg;
766
767         if (!(*chan->status & PAS_STATUS_CAUSE_M))
768                 return IRQ_NONE;
769
770         /* Don't reset packet count so it won't fire again but clear
771          * all others.
772          */
773
774         reg = 0;
775         if (*chan->status & PAS_STATUS_SOFT)
776                 reg |= PAS_IOB_DMA_RXCH_RESET_SINTC;
777         if (*chan->status & PAS_STATUS_ERROR)
778                 reg |= PAS_IOB_DMA_RXCH_RESET_DINTC;
779         if (*chan->status & PAS_STATUS_TIMER)
780                 reg |= PAS_IOB_DMA_RXCH_RESET_TINTC;
781
782         netif_rx_schedule(dev, &mac->napi);
783
784         write_iob_reg(PAS_IOB_DMA_RXCH_RESET(chan->chno), reg);
785
786         return IRQ_HANDLED;
787 }
788
789 static irqreturn_t pasemi_mac_tx_intr(int irq, void *data)
790 {
791         struct pasemi_mac_txring *txring = data;
792         struct pasemi_dmachan *chan = &txring->chan;
793         unsigned int reg, pcnt;
794
795         if (!(*chan->status & PAS_STATUS_CAUSE_M))
796                 return IRQ_NONE;
797
798         pasemi_mac_clean_tx(txring);
799
800         pcnt = *chan->status & PAS_STATUS_PCNT_M;
801
802         reg = PAS_IOB_DMA_TXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_TXCH_RESET_PINTC;
803
804         if (*chan->status & PAS_STATUS_SOFT)
805                 reg |= PAS_IOB_DMA_TXCH_RESET_SINTC;
806         if (*chan->status & PAS_STATUS_ERROR)
807                 reg |= PAS_IOB_DMA_TXCH_RESET_DINTC;
808
809         write_iob_reg(PAS_IOB_DMA_TXCH_RESET(chan->chno), reg);
810
811         return IRQ_HANDLED;
812 }
813
814 static void pasemi_adjust_link(struct net_device *dev)
815 {
816         struct pasemi_mac *mac = netdev_priv(dev);
817         int msg;
818         unsigned int flags;
819         unsigned int new_flags;
820
821         if (!mac->phydev->link) {
822                 /* If no link, MAC speed settings don't matter. Just report
823                  * link down and return.
824                  */
825                 if (mac->link && netif_msg_link(mac))
826                         printk(KERN_INFO "%s: Link is down.\n", dev->name);
827
828                 netif_carrier_off(dev);
829                 mac->link = 0;
830
831                 return;
832         } else
833                 netif_carrier_on(dev);
834
835         flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG);
836         new_flags = flags & ~(PAS_MAC_CFG_PCFG_HD | PAS_MAC_CFG_PCFG_SPD_M |
837                               PAS_MAC_CFG_PCFG_TSR_M);
838
839         if (!mac->phydev->duplex)
840                 new_flags |= PAS_MAC_CFG_PCFG_HD;
841
842         switch (mac->phydev->speed) {
843         case 1000:
844                 new_flags |= PAS_MAC_CFG_PCFG_SPD_1G |
845                              PAS_MAC_CFG_PCFG_TSR_1G;
846                 break;
847         case 100:
848                 new_flags |= PAS_MAC_CFG_PCFG_SPD_100M |
849                              PAS_MAC_CFG_PCFG_TSR_100M;
850                 break;
851         case 10:
852                 new_flags |= PAS_MAC_CFG_PCFG_SPD_10M |
853                              PAS_MAC_CFG_PCFG_TSR_10M;
854                 break;
855         default:
856                 printk("Unsupported speed %d\n", mac->phydev->speed);
857         }
858
859         /* Print on link or speed/duplex change */
860         msg = mac->link != mac->phydev->link || flags != new_flags;
861
862         mac->duplex = mac->phydev->duplex;
863         mac->speed = mac->phydev->speed;
864         mac->link = mac->phydev->link;
865
866         if (new_flags != flags)
867                 write_mac_reg(mac, PAS_MAC_CFG_PCFG, new_flags);
868
869         if (msg && netif_msg_link(mac))
870                 printk(KERN_INFO "%s: Link is up at %d Mbps, %s duplex.\n",
871                        dev->name, mac->speed, mac->duplex ? "full" : "half");
872 }
873
874 static int pasemi_mac_phy_init(struct net_device *dev)
875 {
876         struct pasemi_mac *mac = netdev_priv(dev);
877         struct device_node *dn, *phy_dn;
878         struct phy_device *phydev;
879         unsigned int phy_id;
880         const phandle *ph;
881         const unsigned int *prop;
882         struct resource r;
883         int ret;
884
885         dn = pci_device_to_OF_node(mac->pdev);
886         ph = of_get_property(dn, "phy-handle", NULL);
887         if (!ph)
888                 return -ENODEV;
889         phy_dn = of_find_node_by_phandle(*ph);
890
891         prop = of_get_property(phy_dn, "reg", NULL);
892         ret = of_address_to_resource(phy_dn->parent, 0, &r);
893         if (ret)
894                 goto err;
895
896         phy_id = *prop;
897         snprintf(mac->phy_id, BUS_ID_SIZE, PHY_ID_FMT, (int)r.start, phy_id);
898
899         of_node_put(phy_dn);
900
901         mac->link = 0;
902         mac->speed = 0;
903         mac->duplex = -1;
904
905         phydev = phy_connect(dev, mac->phy_id, &pasemi_adjust_link, 0, PHY_INTERFACE_MODE_SGMII);
906
907         if (IS_ERR(phydev)) {
908                 printk(KERN_ERR "%s: Could not attach to phy\n", dev->name);
909                 return PTR_ERR(phydev);
910         }
911
912         mac->phydev = phydev;
913
914         return 0;
915
916 err:
917         of_node_put(phy_dn);
918         return -ENODEV;
919 }
920
921
922 static int pasemi_mac_open(struct net_device *dev)
923 {
924         struct pasemi_mac *mac = netdev_priv(dev);
925         unsigned int flags;
926         int ret;
927
928         /* enable rx section */
929         write_dma_reg(PAS_DMA_COM_RXCMD, PAS_DMA_COM_RXCMD_EN);
930
931         /* enable tx section */
932         write_dma_reg(PAS_DMA_COM_TXCMD, PAS_DMA_COM_TXCMD_EN);
933
934         flags = PAS_MAC_CFG_TXP_FCE | PAS_MAC_CFG_TXP_FPC(3) |
935                 PAS_MAC_CFG_TXP_SL(3) | PAS_MAC_CFG_TXP_COB(0xf) |
936                 PAS_MAC_CFG_TXP_TIFT(8) | PAS_MAC_CFG_TXP_TIFG(12);
937
938         write_mac_reg(mac, PAS_MAC_CFG_TXP, flags);
939
940         /* 0xffffff is max value, about 16ms */
941         write_iob_reg(PAS_IOB_DMA_COM_TIMEOUTCFG,
942                       PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(0xffffff));
943
944         ret = pasemi_mac_setup_rx_resources(dev);
945         if (ret)
946                 goto out_rx_resources;
947
948         mac->tx = pasemi_mac_setup_tx_resources(dev);
949
950         if (!mac->tx)
951                 goto out_tx_ring;
952
953         write_iob_reg(PAS_IOB_DMA_RXCH_CFG(mac->rx->chan.chno),
954                       PAS_IOB_DMA_RXCH_CFG_CNTTH(0));
955
956         write_iob_reg(PAS_IOB_DMA_TXCH_CFG(mac->tx->chan.chno),
957                       PAS_IOB_DMA_TXCH_CFG_CNTTH(128));
958
959         write_mac_reg(mac, PAS_MAC_IPC_CHNL,
960                       PAS_MAC_IPC_CHNL_DCHNO(mac->rx->chan.chno) |
961                       PAS_MAC_IPC_CHNL_BCH(mac->rx->chan.chno));
962
963         /* enable rx if */
964         write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
965                       PAS_DMA_RXINT_RCMDSTA_EN |
966                       PAS_DMA_RXINT_RCMDSTA_DROPS_M |
967                       PAS_DMA_RXINT_RCMDSTA_BP |
968                       PAS_DMA_RXINT_RCMDSTA_OO |
969                       PAS_DMA_RXINT_RCMDSTA_BT);
970
971         /* enable rx channel */
972         pasemi_dma_start_chan(&rx_ring(mac)->chan, PAS_DMA_RXCHAN_CCMDSTA_DU |
973                                                    PAS_DMA_RXCHAN_CCMDSTA_OD |
974                                                    PAS_DMA_RXCHAN_CCMDSTA_FD |
975                                                    PAS_DMA_RXCHAN_CCMDSTA_DT);
976
977         /* enable tx channel */
978         pasemi_dma_start_chan(&tx_ring(mac)->chan, PAS_DMA_TXCHAN_TCMDSTA_SZ |
979                                                    PAS_DMA_TXCHAN_TCMDSTA_DB |
980                                                    PAS_DMA_TXCHAN_TCMDSTA_DE |
981                                                    PAS_DMA_TXCHAN_TCMDSTA_DA);
982
983         pasemi_mac_replenish_rx_ring(dev, RX_RING_SIZE);
984
985         write_dma_reg(PAS_DMA_RXCHAN_INCR(rx_ring(mac)->chan.chno),
986                       RX_RING_SIZE>>1);
987
988         /* Clear out any residual packet count state from firmware */
989         pasemi_mac_restart_rx_intr(mac);
990         pasemi_mac_restart_tx_intr(mac);
991
992         flags = PAS_MAC_CFG_PCFG_S1 | PAS_MAC_CFG_PCFG_PE |
993                 PAS_MAC_CFG_PCFG_PR | PAS_MAC_CFG_PCFG_CE;
994
995         if (mac->type == MAC_TYPE_GMAC)
996                 flags |= PAS_MAC_CFG_PCFG_TSR_1G | PAS_MAC_CFG_PCFG_SPD_1G;
997         else
998                 flags |= PAS_MAC_CFG_PCFG_TSR_10G | PAS_MAC_CFG_PCFG_SPD_10G;
999
1000         /* Enable interface in MAC */
1001         write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags);
1002
1003         ret = pasemi_mac_phy_init(dev);
1004         /* Some configs don't have PHYs (XAUI etc), so don't complain about
1005          * failed init due to -ENODEV.
1006          */
1007         if (ret && ret != -ENODEV)
1008                 dev_warn(&mac->pdev->dev, "phy init failed: %d\n", ret);
1009
1010         netif_start_queue(dev);
1011         napi_enable(&mac->napi);
1012
1013         snprintf(mac->tx_irq_name, sizeof(mac->tx_irq_name), "%s tx",
1014                  dev->name);
1015
1016         ret = request_irq(mac->tx->chan.irq, &pasemi_mac_tx_intr, IRQF_DISABLED,
1017                           mac->tx_irq_name, mac->tx);
1018         if (ret) {
1019                 dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
1020                         mac->tx->chan.irq, ret);
1021                 goto out_tx_int;
1022         }
1023
1024         snprintf(mac->rx_irq_name, sizeof(mac->rx_irq_name), "%s rx",
1025                  dev->name);
1026
1027         ret = request_irq(mac->rx->chan.irq, &pasemi_mac_rx_intr, IRQF_DISABLED,
1028                           mac->rx_irq_name, mac->rx);
1029         if (ret) {
1030                 dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
1031                         mac->rx->chan.irq, ret);
1032                 goto out_rx_int;
1033         }
1034
1035         if (mac->phydev)
1036                 phy_start(mac->phydev);
1037
1038         return 0;
1039
1040 out_rx_int:
1041         free_irq(mac->tx->chan.irq, mac->tx);
1042 out_tx_int:
1043         napi_disable(&mac->napi);
1044         netif_stop_queue(dev);
1045 out_tx_ring:
1046         if (mac->tx)
1047                 pasemi_mac_free_tx_resources(mac);
1048         pasemi_mac_free_rx_resources(mac);
1049 out_rx_resources:
1050
1051         return ret;
1052 }
1053
1054 #define MAX_RETRIES 5000
1055
1056 static int pasemi_mac_close(struct net_device *dev)
1057 {
1058         struct pasemi_mac *mac = netdev_priv(dev);
1059         unsigned int sta;
1060         int retries;
1061         int rxch, txch;
1062
1063         rxch = rx_ring(mac)->chan.chno;
1064         txch = tx_ring(mac)->chan.chno;
1065
1066         if (mac->phydev) {
1067                 phy_stop(mac->phydev);
1068                 phy_disconnect(mac->phydev);
1069         }
1070
1071         netif_stop_queue(dev);
1072         napi_disable(&mac->napi);
1073
1074         sta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if));
1075         if (sta & (PAS_DMA_RXINT_RCMDSTA_BP |
1076                       PAS_DMA_RXINT_RCMDSTA_OO |
1077                       PAS_DMA_RXINT_RCMDSTA_BT))
1078                 printk(KERN_DEBUG "pasemi_mac: rcmdsta error: 0x%08x\n", sta);
1079
1080         sta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch));
1081         if (sta & (PAS_DMA_RXCHAN_CCMDSTA_DU |
1082                      PAS_DMA_RXCHAN_CCMDSTA_OD |
1083                      PAS_DMA_RXCHAN_CCMDSTA_FD |
1084                      PAS_DMA_RXCHAN_CCMDSTA_DT))
1085                 printk(KERN_DEBUG "pasemi_mac: ccmdsta error: 0x%08x\n", sta);
1086
1087         sta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch));
1088         if (sta & (PAS_DMA_TXCHAN_TCMDSTA_SZ | PAS_DMA_TXCHAN_TCMDSTA_DB |
1089                       PAS_DMA_TXCHAN_TCMDSTA_DE | PAS_DMA_TXCHAN_TCMDSTA_DA))
1090                 printk(KERN_DEBUG "pasemi_mac: tcmdsta error: 0x%08x\n", sta);
1091
1092         /* Clean out any pending buffers */
1093         pasemi_mac_clean_tx(tx_ring(mac));
1094         pasemi_mac_clean_rx(rx_ring(mac), RX_RING_SIZE);
1095
1096         /* Disable interface */
1097         write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch),
1098                       PAS_DMA_TXCHAN_TCMDSTA_ST);
1099         write_dma_reg( PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
1100                       PAS_DMA_RXINT_RCMDSTA_ST);
1101         write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch),
1102                       PAS_DMA_RXCHAN_CCMDSTA_ST);
1103
1104         for (retries = 0; retries < MAX_RETRIES; retries++) {
1105                 sta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(rxch));
1106                 if (!(sta & PAS_DMA_TXCHAN_TCMDSTA_ACT))
1107                         break;
1108                 cond_resched();
1109         }
1110
1111         if (sta & PAS_DMA_TXCHAN_TCMDSTA_ACT)
1112                 dev_err(&mac->dma_pdev->dev, "Failed to stop tx channel\n");
1113
1114         for (retries = 0; retries < MAX_RETRIES; retries++) {
1115                 sta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch));
1116                 if (!(sta & PAS_DMA_RXCHAN_CCMDSTA_ACT))
1117                         break;
1118                 cond_resched();
1119         }
1120
1121         if (sta & PAS_DMA_RXCHAN_CCMDSTA_ACT)
1122                 dev_err(&mac->dma_pdev->dev, "Failed to stop rx channel\n");
1123
1124         for (retries = 0; retries < MAX_RETRIES; retries++) {
1125                 sta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if));
1126                 if (!(sta & PAS_DMA_RXINT_RCMDSTA_ACT))
1127                         break;
1128                 cond_resched();
1129         }
1130
1131         if (sta & PAS_DMA_RXINT_RCMDSTA_ACT)
1132                 dev_err(&mac->dma_pdev->dev, "Failed to stop rx interface\n");
1133
1134         /* Then, disable the channel. This must be done separately from
1135          * stopping, since you can't disable when active.
1136          */
1137
1138         write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch), 0);
1139         write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch), 0);
1140         write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 0);
1141
1142         free_irq(mac->tx->chan.irq, mac->tx);
1143         free_irq(mac->rx->chan.irq, mac->rx);
1144
1145         /* Free resources */
1146         pasemi_mac_free_rx_resources(mac);
1147         pasemi_mac_free_tx_resources(mac);
1148
1149         return 0;
1150 }
1151
1152 static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
1153 {
1154         struct pasemi_mac *mac = netdev_priv(dev);
1155         struct pasemi_mac_txring *txring;
1156         u64 dflags, mactx;
1157         dma_addr_t map[MAX_SKB_FRAGS+1];
1158         unsigned int map_size[MAX_SKB_FRAGS+1];
1159         unsigned long flags;
1160         int i, nfrags;
1161
1162         dflags = XCT_MACTX_O | XCT_MACTX_ST | XCT_MACTX_CRC_PAD;
1163
1164         if (skb->ip_summed == CHECKSUM_PARTIAL) {
1165                 const unsigned char *nh = skb_network_header(skb);
1166
1167                 switch (ip_hdr(skb)->protocol) {
1168                 case IPPROTO_TCP:
1169                         dflags |= XCT_MACTX_CSUM_TCP;
1170                         dflags |= XCT_MACTX_IPH(skb_network_header_len(skb) >> 2);
1171                         dflags |= XCT_MACTX_IPO(nh - skb->data);
1172                         break;
1173                 case IPPROTO_UDP:
1174                         dflags |= XCT_MACTX_CSUM_UDP;
1175                         dflags |= XCT_MACTX_IPH(skb_network_header_len(skb) >> 2);
1176                         dflags |= XCT_MACTX_IPO(nh - skb->data);
1177                         break;
1178                 }
1179         }
1180
1181         nfrags = skb_shinfo(skb)->nr_frags;
1182
1183         map[0] = pci_map_single(mac->dma_pdev, skb->data, skb_headlen(skb),
1184                                 PCI_DMA_TODEVICE);
1185         map_size[0] = skb_headlen(skb);
1186         if (dma_mapping_error(map[0]))
1187                 goto out_err_nolock;
1188
1189         for (i = 0; i < nfrags; i++) {
1190                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1191
1192                 map[i+1] = pci_map_page(mac->dma_pdev, frag->page,
1193                                         frag->page_offset, frag->size,
1194                                         PCI_DMA_TODEVICE);
1195                 map_size[i+1] = frag->size;
1196                 if (dma_mapping_error(map[i+1])) {
1197                         nfrags = i;
1198                         goto out_err_nolock;
1199                 }
1200         }
1201
1202         mactx = dflags | XCT_MACTX_LLEN(skb->len);
1203
1204         txring = tx_ring(mac);
1205
1206         spin_lock_irqsave(&txring->lock, flags);
1207
1208         /* Avoid stepping on the same cache line that the DMA controller
1209          * is currently about to send, so leave at least 8 words available.
1210          * Total free space needed is mactx + fragments + 8
1211          */
1212         if (RING_AVAIL(txring) < nfrags + 10) {
1213                 /* no room -- stop the queue and wait for tx intr */
1214                 netif_stop_queue(dev);
1215                 goto out_err;
1216         }
1217
1218         TX_DESC(txring, txring->next_to_fill) = mactx;
1219         txring->next_to_fill++;
1220         TX_DESC_INFO(txring, txring->next_to_fill).skb = skb;
1221         for (i = 0; i <= nfrags; i++) {
1222                 TX_DESC(txring, txring->next_to_fill+i) =
1223                         XCT_PTR_LEN(map_size[i]) | XCT_PTR_ADDR(map[i]);
1224                 TX_DESC_INFO(txring, txring->next_to_fill+i).dma = map[i];
1225         }
1226
1227         /* We have to add an even number of 8-byte entries to the ring
1228          * even if the last one is unused. That means always an odd number
1229          * of pointers + one mactx descriptor.
1230          */
1231         if (nfrags & 1)
1232                 nfrags++;
1233
1234         txring->next_to_fill = (txring->next_to_fill + nfrags + 1) &
1235                                 (TX_RING_SIZE-1);
1236
1237         dev->stats.tx_packets++;
1238         dev->stats.tx_bytes += skb->len;
1239
1240         spin_unlock_irqrestore(&txring->lock, flags);
1241
1242         write_dma_reg(PAS_DMA_TXCHAN_INCR(txring->chan.chno), (nfrags+2) >> 1);
1243
1244         return NETDEV_TX_OK;
1245
1246 out_err:
1247         spin_unlock_irqrestore(&txring->lock, flags);
1248 out_err_nolock:
1249         while (nfrags--)
1250                 pci_unmap_single(mac->dma_pdev, map[nfrags], map_size[nfrags],
1251                                  PCI_DMA_TODEVICE);
1252
1253         return NETDEV_TX_BUSY;
1254 }
1255
1256 static void pasemi_mac_set_rx_mode(struct net_device *dev)
1257 {
1258         struct pasemi_mac *mac = netdev_priv(dev);
1259         unsigned int flags;
1260
1261         flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG);
1262
1263         /* Set promiscuous */
1264         if (dev->flags & IFF_PROMISC)
1265                 flags |= PAS_MAC_CFG_PCFG_PR;
1266         else
1267                 flags &= ~PAS_MAC_CFG_PCFG_PR;
1268
1269         write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags);
1270 }
1271
1272
1273 static int pasemi_mac_poll(struct napi_struct *napi, int budget)
1274 {
1275         struct pasemi_mac *mac = container_of(napi, struct pasemi_mac, napi);
1276         struct net_device *dev = mac->netdev;
1277         int pkts;
1278
1279         pasemi_mac_clean_tx(tx_ring(mac));
1280         pkts = pasemi_mac_clean_rx(rx_ring(mac), budget);
1281         if (pkts < budget) {
1282                 /* all done, no more packets present */
1283                 netif_rx_complete(dev, napi);
1284
1285                 pasemi_mac_restart_rx_intr(mac);
1286         }
1287         return pkts;
1288 }
1289
1290 static int __devinit
1291 pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1292 {
1293         struct net_device *dev;
1294         struct pasemi_mac *mac;
1295         int err;
1296         DECLARE_MAC_BUF(mac_buf);
1297
1298         err = pci_enable_device(pdev);
1299         if (err)
1300                 return err;
1301
1302         dev = alloc_etherdev(sizeof(struct pasemi_mac));
1303         if (dev == NULL) {
1304                 dev_err(&pdev->dev,
1305                         "pasemi_mac: Could not allocate ethernet device.\n");
1306                 err = -ENOMEM;
1307                 goto out_disable_device;
1308         }
1309
1310         pci_set_drvdata(pdev, dev);
1311         SET_NETDEV_DEV(dev, &pdev->dev);
1312
1313         mac = netdev_priv(dev);
1314
1315         mac->pdev = pdev;
1316         mac->netdev = dev;
1317
1318         netif_napi_add(dev, &mac->napi, pasemi_mac_poll, 64);
1319
1320         dev->features = NETIF_F_IP_CSUM | NETIF_F_LLTX | NETIF_F_SG;
1321
1322         mac->dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL);
1323         if (!mac->dma_pdev) {
1324                 dev_err(&mac->pdev->dev, "Can't find DMA Controller\n");
1325                 err = -ENODEV;
1326                 goto out;
1327         }
1328
1329         mac->iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL);
1330         if (!mac->iob_pdev) {
1331                 dev_err(&mac->pdev->dev, "Can't find I/O Bridge\n");
1332                 err = -ENODEV;
1333                 goto out;
1334         }
1335
1336         /* get mac addr from device tree */
1337         if (pasemi_get_mac_addr(mac) || !is_valid_ether_addr(mac->mac_addr)) {
1338                 err = -ENODEV;
1339                 goto out;
1340         }
1341         memcpy(dev->dev_addr, mac->mac_addr, sizeof(mac->mac_addr));
1342
1343         mac->dma_if = mac_to_intf(mac);
1344         if (mac->dma_if < 0) {
1345                 dev_err(&mac->pdev->dev, "Can't map DMA interface\n");
1346                 err = -ENODEV;
1347                 goto out;
1348         }
1349
1350         switch (pdev->device) {
1351         case 0xa005:
1352                 mac->type = MAC_TYPE_GMAC;
1353                 break;
1354         case 0xa006:
1355                 mac->type = MAC_TYPE_XAUI;
1356                 break;
1357         default:
1358                 err = -ENODEV;
1359                 goto out;
1360         }
1361
1362         dev->open = pasemi_mac_open;
1363         dev->stop = pasemi_mac_close;
1364         dev->hard_start_xmit = pasemi_mac_start_tx;
1365         dev->set_multicast_list = pasemi_mac_set_rx_mode;
1366
1367         if (err)
1368                 goto out;
1369
1370         mac->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
1371
1372         /* Enable most messages by default */
1373         mac->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
1374
1375         err = register_netdev(dev);
1376
1377         if (err) {
1378                 dev_err(&mac->pdev->dev, "register_netdev failed with error %d\n",
1379                         err);
1380                 goto out;
1381         } else if netif_msg_probe(mac)
1382                 printk(KERN_INFO "%s: PA Semi %s: intf %d, hw addr %s\n",
1383                        dev->name, mac->type == MAC_TYPE_GMAC ? "GMAC" : "XAUI",
1384                        mac->dma_if, print_mac(mac_buf, dev->dev_addr));
1385
1386         return err;
1387
1388 out:
1389         if (mac->iob_pdev)
1390                 pci_dev_put(mac->iob_pdev);
1391         if (mac->dma_pdev)
1392                 pci_dev_put(mac->dma_pdev);
1393
1394         free_netdev(dev);
1395 out_disable_device:
1396         pci_disable_device(pdev);
1397         return err;
1398
1399 }
1400
1401 static void __devexit pasemi_mac_remove(struct pci_dev *pdev)
1402 {
1403         struct net_device *netdev = pci_get_drvdata(pdev);
1404         struct pasemi_mac *mac;
1405
1406         if (!netdev)
1407                 return;
1408
1409         mac = netdev_priv(netdev);
1410
1411         unregister_netdev(netdev);
1412
1413         pci_disable_device(pdev);
1414         pci_dev_put(mac->dma_pdev);
1415         pci_dev_put(mac->iob_pdev);
1416
1417         pasemi_dma_free_chan(&mac->tx->chan);
1418         pasemi_dma_free_chan(&mac->rx->chan);
1419
1420         pci_set_drvdata(pdev, NULL);
1421         free_netdev(netdev);
1422 }
1423
1424 static struct pci_device_id pasemi_mac_pci_tbl[] = {
1425         { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa005) },
1426         { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa006) },
1427         { },
1428 };
1429
1430 MODULE_DEVICE_TABLE(pci, pasemi_mac_pci_tbl);
1431
1432 static struct pci_driver pasemi_mac_driver = {
1433         .name           = "pasemi_mac",
1434         .id_table       = pasemi_mac_pci_tbl,
1435         .probe          = pasemi_mac_probe,
1436         .remove         = __devexit_p(pasemi_mac_remove),
1437 };
1438
1439 static void __exit pasemi_mac_cleanup_module(void)
1440 {
1441         pci_unregister_driver(&pasemi_mac_driver);
1442 }
1443
1444 int pasemi_mac_init_module(void)
1445 {
1446         int err;
1447
1448         err = pasemi_dma_init();
1449         if (err)
1450                 return err;
1451
1452         return pci_register_driver(&pasemi_mac_driver);
1453 }
1454
1455 module_init(pasemi_mac_init_module);
1456 module_exit(pasemi_mac_cleanup_module);