gianfar: Fix a filer bug
[linux-2.6.git] / drivers / net / gianfar.c
1 /*
2  * drivers/net/gianfar.c
3  *
4  * Gianfar Ethernet Driver
5  * This driver is designed for the non-CPM ethernet controllers
6  * on the 85xx and 83xx family of integrated processors
7  * Based on 8260_io/fcc_enet.c
8  *
9  * Author: Andy Fleming
10  * Maintainer: Kumar Gala
11  * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
12  *
13  * Copyright 2002-2009 Freescale Semiconductor, Inc.
14  * Copyright 2007 MontaVista Software, Inc.
15  *
16  * This program is free software; you can redistribute  it and/or modify it
17  * under  the terms of  the GNU General  Public License as published by the
18  * Free Software Foundation;  either version 2 of the  License, or (at your
19  * option) any later version.
20  *
21  *  Gianfar:  AKA Lambda Draconis, "Dragon"
22  *  RA 11 31 24.2
23  *  Dec +69 19 52
24  *  V 3.84
25  *  B-V +1.62
26  *
27  *  Theory of operation
28  *
29  *  The driver is initialized through of_device. Configuration information
30  *  is therefore conveyed through an OF-style device tree.
31  *
32  *  The Gianfar Ethernet Controller uses a ring of buffer
33  *  descriptors.  The beginning is indicated by a register
34  *  pointing to the physical address of the start of the ring.
35  *  The end is determined by a "wrap" bit being set in the
36  *  last descriptor of the ring.
37  *
38  *  When a packet is received, the RXF bit in the
39  *  IEVENT register is set, triggering an interrupt when the
40  *  corresponding bit in the IMASK register is also set (if
41  *  interrupt coalescing is active, then the interrupt may not
42  *  happen immediately, but will wait until either a set number
43  *  of frames or amount of time have passed).  In NAPI, the
44  *  interrupt handler will signal there is work to be done, and
45  *  exit. This method will start at the last known empty
46  *  descriptor, and process every subsequent descriptor until there
47  *  are none left with data (NAPI will stop after a set number of
48  *  packets to give time to other tasks, but will eventually
49  *  process all the packets).  The data arrives inside a
50  *  pre-allocated skb, and so after the skb is passed up to the
51  *  stack, a new skb must be allocated, and the address field in
52  *  the buffer descriptor must be updated to indicate this new
53  *  skb.
54  *
55  *  When the kernel requests that a packet be transmitted, the
56  *  driver starts where it left off last time, and points the
57  *  descriptor at the buffer which was passed in.  The driver
58  *  then informs the DMA engine that there are packets ready to
59  *  be transmitted.  Once the controller is finished transmitting
60  *  the packet, an interrupt may be triggered (under the same
61  *  conditions as for reception, but depending on the TXF bit).
62  *  The driver then cleans up the buffer.
63  */
64
65 #include <linux/kernel.h>
66 #include <linux/string.h>
67 #include <linux/errno.h>
68 #include <linux/unistd.h>
69 #include <linux/slab.h>
70 #include <linux/interrupt.h>
71 #include <linux/init.h>
72 #include <linux/delay.h>
73 #include <linux/netdevice.h>
74 #include <linux/etherdevice.h>
75 #include <linux/skbuff.h>
76 #include <linux/if_vlan.h>
77 #include <linux/spinlock.h>
78 #include <linux/mm.h>
79 #include <linux/of_mdio.h>
80 #include <linux/of_platform.h>
81 #include <linux/ip.h>
82 #include <linux/tcp.h>
83 #include <linux/udp.h>
84 #include <linux/in.h>
85
86 #include <asm/io.h>
87 #include <asm/irq.h>
88 #include <asm/uaccess.h>
89 #include <linux/module.h>
90 #include <linux/dma-mapping.h>
91 #include <linux/crc32.h>
92 #include <linux/mii.h>
93 #include <linux/phy.h>
94 #include <linux/phy_fixed.h>
95 #include <linux/of.h>
96
97 #include "gianfar.h"
98 #include "fsl_pq_mdio.h"
99
100 #define TX_TIMEOUT      (1*HZ)
101 #undef BRIEF_GFAR_ERRORS
102 #undef VERBOSE_GFAR_ERRORS
103
104 const char gfar_driver_name[] = "Gianfar Ethernet";
105 const char gfar_driver_version[] = "1.3";
106
107 static int gfar_enet_open(struct net_device *dev);
108 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
109 static void gfar_reset_task(struct work_struct *work);
110 static void gfar_timeout(struct net_device *dev);
111 static int gfar_close(struct net_device *dev);
112 struct sk_buff *gfar_new_skb(struct net_device *dev);
113 static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
114                 struct sk_buff *skb);
115 static int gfar_set_mac_address(struct net_device *dev);
116 static int gfar_change_mtu(struct net_device *dev, int new_mtu);
117 static irqreturn_t gfar_error(int irq, void *dev_id);
118 static irqreturn_t gfar_transmit(int irq, void *dev_id);
119 static irqreturn_t gfar_interrupt(int irq, void *dev_id);
120 static void adjust_link(struct net_device *dev);
121 static void init_registers(struct net_device *dev);
122 static int init_phy(struct net_device *dev);
123 static int gfar_probe(struct of_device *ofdev,
124                 const struct of_device_id *match);
125 static int gfar_remove(struct of_device *ofdev);
126 static void free_skb_resources(struct gfar_private *priv);
127 static void gfar_set_multi(struct net_device *dev);
128 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
129 static void gfar_configure_serdes(struct net_device *dev);
130 static int gfar_poll(struct napi_struct *napi, int budget);
131 #ifdef CONFIG_NET_POLL_CONTROLLER
132 static void gfar_netpoll(struct net_device *dev);
133 #endif
134 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
135 static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
136 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
137                               int amount_pull);
138 static void gfar_vlan_rx_register(struct net_device *netdev,
139                                 struct vlan_group *grp);
140 void gfar_halt(struct net_device *dev);
141 static void gfar_halt_nodisable(struct net_device *dev);
142 void gfar_start(struct net_device *dev);
143 static void gfar_clear_exact_match(struct net_device *dev);
144 static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
145 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
146 u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb);
147
148 MODULE_AUTHOR("Freescale Semiconductor, Inc");
149 MODULE_DESCRIPTION("Gianfar Ethernet Driver");
150 MODULE_LICENSE("GPL");
151
152 static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
153                             dma_addr_t buf)
154 {
155         u32 lstatus;
156
157         bdp->bufPtr = buf;
158
159         lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
160         if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
161                 lstatus |= BD_LFLAG(RXBD_WRAP);
162
163         eieio();
164
165         bdp->lstatus = lstatus;
166 }
167
168 static int gfar_init_bds(struct net_device *ndev)
169 {
170         struct gfar_private *priv = netdev_priv(ndev);
171         struct gfar_priv_tx_q *tx_queue = NULL;
172         struct gfar_priv_rx_q *rx_queue = NULL;
173         struct txbd8 *txbdp;
174         struct rxbd8 *rxbdp;
175         int i, j;
176
177         for (i = 0; i < priv->num_tx_queues; i++) {
178                 tx_queue = priv->tx_queue[i];
179                 /* Initialize some variables in our dev structure */
180                 tx_queue->num_txbdfree = tx_queue->tx_ring_size;
181                 tx_queue->dirty_tx = tx_queue->tx_bd_base;
182                 tx_queue->cur_tx = tx_queue->tx_bd_base;
183                 tx_queue->skb_curtx = 0;
184                 tx_queue->skb_dirtytx = 0;
185
186                 /* Initialize Transmit Descriptor Ring */
187                 txbdp = tx_queue->tx_bd_base;
188                 for (j = 0; j < tx_queue->tx_ring_size; j++) {
189                         txbdp->lstatus = 0;
190                         txbdp->bufPtr = 0;
191                         txbdp++;
192                 }
193
194                 /* Set the last descriptor in the ring to indicate wrap */
195                 txbdp--;
196                 txbdp->status |= TXBD_WRAP;
197         }
198
199         for (i = 0; i < priv->num_rx_queues; i++) {
200                 rx_queue = priv->rx_queue[i];
201                 rx_queue->cur_rx = rx_queue->rx_bd_base;
202                 rx_queue->skb_currx = 0;
203                 rxbdp = rx_queue->rx_bd_base;
204
205                 for (j = 0; j < rx_queue->rx_ring_size; j++) {
206                         struct sk_buff *skb = rx_queue->rx_skbuff[j];
207
208                         if (skb) {
209                                 gfar_init_rxbdp(rx_queue, rxbdp,
210                                                 rxbdp->bufPtr);
211                         } else {
212                                 skb = gfar_new_skb(ndev);
213                                 if (!skb) {
214                                         pr_err("%s: Can't allocate RX buffers\n",
215                                                         ndev->name);
216                                         goto err_rxalloc_fail;
217                                 }
218                                 rx_queue->rx_skbuff[j] = skb;
219
220                                 gfar_new_rxbdp(rx_queue, rxbdp, skb);
221                         }
222
223                         rxbdp++;
224                 }
225
226         }
227
228         return 0;
229
230 err_rxalloc_fail:
231         free_skb_resources(priv);
232         return -ENOMEM;
233 }
234
235 static int gfar_alloc_skb_resources(struct net_device *ndev)
236 {
237         void *vaddr;
238         dma_addr_t addr;
239         int i, j, k;
240         struct gfar_private *priv = netdev_priv(ndev);
241         struct device *dev = &priv->ofdev->dev;
242         struct gfar_priv_tx_q *tx_queue = NULL;
243         struct gfar_priv_rx_q *rx_queue = NULL;
244
245         priv->total_tx_ring_size = 0;
246         for (i = 0; i < priv->num_tx_queues; i++)
247                 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
248
249         priv->total_rx_ring_size = 0;
250         for (i = 0; i < priv->num_rx_queues; i++)
251                 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
252
253         /* Allocate memory for the buffer descriptors */
254         vaddr = dma_alloc_coherent(dev,
255                         sizeof(struct txbd8) * priv->total_tx_ring_size +
256                         sizeof(struct rxbd8) * priv->total_rx_ring_size,
257                         &addr, GFP_KERNEL);
258         if (!vaddr) {
259                 if (netif_msg_ifup(priv))
260                         pr_err("%s: Could not allocate buffer descriptors!\n",
261                                ndev->name);
262                 return -ENOMEM;
263         }
264
265         for (i = 0; i < priv->num_tx_queues; i++) {
266                 tx_queue = priv->tx_queue[i];
267                 tx_queue->tx_bd_base = (struct txbd8 *) vaddr;
268                 tx_queue->tx_bd_dma_base = addr;
269                 tx_queue->dev = ndev;
270                 /* enet DMA only understands physical addresses */
271                 addr    += sizeof(struct txbd8) *tx_queue->tx_ring_size;
272                 vaddr   += sizeof(struct txbd8) *tx_queue->tx_ring_size;
273         }
274
275         /* Start the rx descriptor ring where the tx ring leaves off */
276         for (i = 0; i < priv->num_rx_queues; i++) {
277                 rx_queue = priv->rx_queue[i];
278                 rx_queue->rx_bd_base = (struct rxbd8 *) vaddr;
279                 rx_queue->rx_bd_dma_base = addr;
280                 rx_queue->dev = ndev;
281                 addr    += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
282                 vaddr   += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
283         }
284
285         /* Setup the skbuff rings */
286         for (i = 0; i < priv->num_tx_queues; i++) {
287                 tx_queue = priv->tx_queue[i];
288                 tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
289                                   tx_queue->tx_ring_size, GFP_KERNEL);
290                 if (!tx_queue->tx_skbuff) {
291                         if (netif_msg_ifup(priv))
292                                 pr_err("%s: Could not allocate tx_skbuff\n",
293                                                 ndev->name);
294                         goto cleanup;
295                 }
296
297                 for (k = 0; k < tx_queue->tx_ring_size; k++)
298                         tx_queue->tx_skbuff[k] = NULL;
299         }
300
301         for (i = 0; i < priv->num_rx_queues; i++) {
302                 rx_queue = priv->rx_queue[i];
303                 rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
304                                   rx_queue->rx_ring_size, GFP_KERNEL);
305
306                 if (!rx_queue->rx_skbuff) {
307                         if (netif_msg_ifup(priv))
308                                 pr_err("%s: Could not allocate rx_skbuff\n",
309                                        ndev->name);
310                         goto cleanup;
311                 }
312
313                 for (j = 0; j < rx_queue->rx_ring_size; j++)
314                         rx_queue->rx_skbuff[j] = NULL;
315         }
316
317         if (gfar_init_bds(ndev))
318                 goto cleanup;
319
320         return 0;
321
322 cleanup:
323         free_skb_resources(priv);
324         return -ENOMEM;
325 }
326
327 static void gfar_init_tx_rx_base(struct gfar_private *priv)
328 {
329         struct gfar __iomem *regs = priv->gfargrp[0].regs;
330         u32 __iomem *baddr;
331         int i;
332
333         baddr = &regs->tbase0;
334         for(i = 0; i < priv->num_tx_queues; i++) {
335                 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
336                 baddr   += 2;
337         }
338
339         baddr = &regs->rbase0;
340         for(i = 0; i < priv->num_rx_queues; i++) {
341                 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
342                 baddr   += 2;
343         }
344 }
345
346 static void gfar_init_mac(struct net_device *ndev)
347 {
348         struct gfar_private *priv = netdev_priv(ndev);
349         struct gfar __iomem *regs = priv->gfargrp[0].regs;
350         u32 rctrl = 0;
351         u32 tctrl = 0;
352         u32 attrs = 0;
353
354         /* write the tx/rx base registers */
355         gfar_init_tx_rx_base(priv);
356
357         /* Configure the coalescing support */
358         gfar_configure_coalescing(priv, 0xFF, 0xFF);
359
360         if (priv->rx_filer_enable) {
361                 rctrl |= RCTRL_FILREN;
362                 /* Program the RIR0 reg with the required distribution */
363                 gfar_write(&regs->rir0, DEFAULT_RIR0);
364         }
365
366         if (priv->rx_csum_enable)
367                 rctrl |= RCTRL_CHECKSUMMING;
368
369         if (priv->extended_hash) {
370                 rctrl |= RCTRL_EXTHASH;
371
372                 gfar_clear_exact_match(ndev);
373                 rctrl |= RCTRL_EMEN;
374         }
375
376         if (priv->padding) {
377                 rctrl &= ~RCTRL_PAL_MASK;
378                 rctrl |= RCTRL_PADDING(priv->padding);
379         }
380
381         /* keep vlan related bits if it's enabled */
382         if (priv->vlgrp) {
383                 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
384                 tctrl |= TCTRL_VLINS;
385         }
386
387         /* Init rctrl based on our settings */
388         gfar_write(&regs->rctrl, rctrl);
389
390         if (ndev->features & NETIF_F_IP_CSUM)
391                 tctrl |= TCTRL_INIT_CSUM;
392
393         tctrl |= TCTRL_TXSCHED_PRIO;
394
395         gfar_write(&regs->tctrl, tctrl);
396
397         /* Set the extraction length and index */
398         attrs = ATTRELI_EL(priv->rx_stash_size) |
399                 ATTRELI_EI(priv->rx_stash_index);
400
401         gfar_write(&regs->attreli, attrs);
402
403         /* Start with defaults, and add stashing or locking
404          * depending on the approprate variables */
405         attrs = ATTR_INIT_SETTINGS;
406
407         if (priv->bd_stash_en)
408                 attrs |= ATTR_BDSTASH;
409
410         if (priv->rx_stash_size != 0)
411                 attrs |= ATTR_BUFSTASH;
412
413         gfar_write(&regs->attr, attrs);
414
415         gfar_write(&regs->fifo_tx_thr, priv->fifo_threshold);
416         gfar_write(&regs->fifo_tx_starve, priv->fifo_starve);
417         gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
418 }
419
420 static const struct net_device_ops gfar_netdev_ops = {
421         .ndo_open = gfar_enet_open,
422         .ndo_start_xmit = gfar_start_xmit,
423         .ndo_stop = gfar_close,
424         .ndo_change_mtu = gfar_change_mtu,
425         .ndo_set_multicast_list = gfar_set_multi,
426         .ndo_tx_timeout = gfar_timeout,
427         .ndo_do_ioctl = gfar_ioctl,
428         .ndo_select_queue = gfar_select_queue,
429         .ndo_vlan_rx_register = gfar_vlan_rx_register,
430         .ndo_set_mac_address = eth_mac_addr,
431         .ndo_validate_addr = eth_validate_addr,
432 #ifdef CONFIG_NET_POLL_CONTROLLER
433         .ndo_poll_controller = gfar_netpoll,
434 #endif
435 };
436
437 unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
438 unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
439
440 void lock_rx_qs(struct gfar_private *priv)
441 {
442         int i = 0x0;
443
444         for (i = 0; i < priv->num_rx_queues; i++)
445                 spin_lock(&priv->rx_queue[i]->rxlock);
446 }
447
448 void lock_tx_qs(struct gfar_private *priv)
449 {
450         int i = 0x0;
451
452         for (i = 0; i < priv->num_tx_queues; i++)
453                 spin_lock(&priv->tx_queue[i]->txlock);
454 }
455
456 void unlock_rx_qs(struct gfar_private *priv)
457 {
458         int i = 0x0;
459
460         for (i = 0; i < priv->num_rx_queues; i++)
461                 spin_unlock(&priv->rx_queue[i]->rxlock);
462 }
463
464 void unlock_tx_qs(struct gfar_private *priv)
465 {
466         int i = 0x0;
467
468         for (i = 0; i < priv->num_tx_queues; i++)
469                 spin_unlock(&priv->tx_queue[i]->txlock);
470 }
471
472 /* Returns 1 if incoming frames use an FCB */
473 static inline int gfar_uses_fcb(struct gfar_private *priv)
474 {
475         return priv->vlgrp || priv->rx_csum_enable;
476 }
477
478 u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb)
479 {
480         return skb_get_queue_mapping(skb);
481 }
482 static void free_tx_pointers(struct gfar_private *priv)
483 {
484         int i = 0;
485
486         for (i = 0; i < priv->num_tx_queues; i++)
487                 kfree(priv->tx_queue[i]);
488 }
489
490 static void free_rx_pointers(struct gfar_private *priv)
491 {
492         int i = 0;
493
494         for (i = 0; i < priv->num_rx_queues; i++)
495                 kfree(priv->rx_queue[i]);
496 }
497
498 static void unmap_group_regs(struct gfar_private *priv)
499 {
500         int i = 0;
501
502         for (i = 0; i < MAXGROUPS; i++)
503                 if (priv->gfargrp[i].regs)
504                         iounmap(priv->gfargrp[i].regs);
505 }
506
507 static void disable_napi(struct gfar_private *priv)
508 {
509         int i = 0;
510
511         for (i = 0; i < priv->num_grps; i++)
512                 napi_disable(&priv->gfargrp[i].napi);
513 }
514
515 static void enable_napi(struct gfar_private *priv)
516 {
517         int i = 0;
518
519         for (i = 0; i < priv->num_grps; i++)
520                 napi_enable(&priv->gfargrp[i].napi);
521 }
522
523 static int gfar_parse_group(struct device_node *np,
524                 struct gfar_private *priv, const char *model)
525 {
526         u32 *queue_mask;
527         u64 addr, size;
528
529         addr = of_translate_address(np,
530                         of_get_address(np, 0, &size, NULL));
531         priv->gfargrp[priv->num_grps].regs = ioremap(addr, size);
532
533         if (!priv->gfargrp[priv->num_grps].regs)
534                 return -ENOMEM;
535
536         priv->gfargrp[priv->num_grps].interruptTransmit =
537                         irq_of_parse_and_map(np, 0);
538
539         /* If we aren't the FEC we have multiple interrupts */
540         if (model && strcasecmp(model, "FEC")) {
541                 priv->gfargrp[priv->num_grps].interruptReceive =
542                         irq_of_parse_and_map(np, 1);
543                 priv->gfargrp[priv->num_grps].interruptError =
544                         irq_of_parse_and_map(np,2);
545                 if (priv->gfargrp[priv->num_grps].interruptTransmit < 0 ||
546                         priv->gfargrp[priv->num_grps].interruptReceive < 0 ||
547                         priv->gfargrp[priv->num_grps].interruptError < 0) {
548                         return -EINVAL;
549                 }
550         }
551
552         priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
553         priv->gfargrp[priv->num_grps].priv = priv;
554         spin_lock_init(&priv->gfargrp[priv->num_grps].grplock);
555         if(priv->mode == MQ_MG_MODE) {
556                 queue_mask = (u32 *)of_get_property(np,
557                                         "fsl,rx-bit-map", NULL);
558                 priv->gfargrp[priv->num_grps].rx_bit_map =
559                         queue_mask ?  *queue_mask :(DEFAULT_MAPPING >> priv->num_grps);
560                 queue_mask = (u32 *)of_get_property(np,
561                                         "fsl,tx-bit-map", NULL);
562                 priv->gfargrp[priv->num_grps].tx_bit_map =
563                         queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
564         } else {
565                 priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF;
566                 priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF;
567         }
568         priv->num_grps++;
569
570         return 0;
571 }
572
573 static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
574 {
575         const char *model;
576         const char *ctype;
577         const void *mac_addr;
578         int err = 0, i;
579         struct net_device *dev = NULL;
580         struct gfar_private *priv = NULL;
581         struct device_node *np = ofdev->node;
582         struct device_node *child = NULL;
583         const u32 *stash;
584         const u32 *stash_len;
585         const u32 *stash_idx;
586         unsigned int num_tx_qs, num_rx_qs;
587         u32 *tx_queues, *rx_queues;
588
589         if (!np || !of_device_is_available(np))
590                 return -ENODEV;
591
592         /* parse the num of tx and rx queues */
593         tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
594         num_tx_qs = tx_queues ? *tx_queues : 1;
595
596         if (num_tx_qs > MAX_TX_QS) {
597                 printk(KERN_ERR "num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
598                                 num_tx_qs, MAX_TX_QS);
599                 printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n");
600                 return -EINVAL;
601         }
602
603         rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
604         num_rx_qs = rx_queues ? *rx_queues : 1;
605
606         if (num_rx_qs > MAX_RX_QS) {
607                 printk(KERN_ERR "num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
608                                 num_tx_qs, MAX_TX_QS);
609                 printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n");
610                 return -EINVAL;
611         }
612
613         *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
614         dev = *pdev;
615         if (NULL == dev)
616                 return -ENOMEM;
617
618         priv = netdev_priv(dev);
619         priv->node = ofdev->node;
620         priv->ndev = dev;
621
622         dev->num_tx_queues = num_tx_qs;
623         dev->real_num_tx_queues = num_tx_qs;
624         priv->num_tx_queues = num_tx_qs;
625         priv->num_rx_queues = num_rx_qs;
626         priv->num_grps = 0x0;
627
628         model = of_get_property(np, "model", NULL);
629
630         for (i = 0; i < MAXGROUPS; i++)
631                 priv->gfargrp[i].regs = NULL;
632
633         /* Parse and initialize group specific information */
634         if (of_device_is_compatible(np, "fsl,etsec2")) {
635                 priv->mode = MQ_MG_MODE;
636                 for_each_child_of_node(np, child) {
637                         err = gfar_parse_group(child, priv, model);
638                         if (err)
639                                 goto err_grp_init;
640                 }
641         } else {
642                 priv->mode = SQ_SG_MODE;
643                 err = gfar_parse_group(np, priv, model);
644                 if(err)
645                         goto err_grp_init;
646         }
647
648         for (i = 0; i < priv->num_tx_queues; i++)
649                priv->tx_queue[i] = NULL;
650         for (i = 0; i < priv->num_rx_queues; i++)
651                 priv->rx_queue[i] = NULL;
652
653         for (i = 0; i < priv->num_tx_queues; i++) {
654                 priv->tx_queue[i] =  (struct gfar_priv_tx_q *)kmalloc(
655                                 sizeof (struct gfar_priv_tx_q), GFP_KERNEL);
656                 if (!priv->tx_queue[i]) {
657                         err = -ENOMEM;
658                         goto tx_alloc_failed;
659                 }
660                 priv->tx_queue[i]->tx_skbuff = NULL;
661                 priv->tx_queue[i]->qindex = i;
662                 priv->tx_queue[i]->dev = dev;
663                 spin_lock_init(&(priv->tx_queue[i]->txlock));
664         }
665
666         for (i = 0; i < priv->num_rx_queues; i++) {
667                 priv->rx_queue[i] = (struct gfar_priv_rx_q *)kmalloc(
668                                         sizeof (struct gfar_priv_rx_q), GFP_KERNEL);
669                 if (!priv->rx_queue[i]) {
670                         err = -ENOMEM;
671                         goto rx_alloc_failed;
672                 }
673                 priv->rx_queue[i]->rx_skbuff = NULL;
674                 priv->rx_queue[i]->qindex = i;
675                 priv->rx_queue[i]->dev = dev;
676                 spin_lock_init(&(priv->rx_queue[i]->rxlock));
677         }
678
679
680         stash = of_get_property(np, "bd-stash", NULL);
681
682         if (stash) {
683                 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
684                 priv->bd_stash_en = 1;
685         }
686
687         stash_len = of_get_property(np, "rx-stash-len", NULL);
688
689         if (stash_len)
690                 priv->rx_stash_size = *stash_len;
691
692         stash_idx = of_get_property(np, "rx-stash-idx", NULL);
693
694         if (stash_idx)
695                 priv->rx_stash_index = *stash_idx;
696
697         if (stash_len || stash_idx)
698                 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
699
700         mac_addr = of_get_mac_address(np);
701         if (mac_addr)
702                 memcpy(dev->dev_addr, mac_addr, MAC_ADDR_LEN);
703
704         if (model && !strcasecmp(model, "TSEC"))
705                 priv->device_flags =
706                         FSL_GIANFAR_DEV_HAS_GIGABIT |
707                         FSL_GIANFAR_DEV_HAS_COALESCE |
708                         FSL_GIANFAR_DEV_HAS_RMON |
709                         FSL_GIANFAR_DEV_HAS_MULTI_INTR;
710         if (model && !strcasecmp(model, "eTSEC"))
711                 priv->device_flags =
712                         FSL_GIANFAR_DEV_HAS_GIGABIT |
713                         FSL_GIANFAR_DEV_HAS_COALESCE |
714                         FSL_GIANFAR_DEV_HAS_RMON |
715                         FSL_GIANFAR_DEV_HAS_MULTI_INTR |
716                         FSL_GIANFAR_DEV_HAS_PADDING |
717                         FSL_GIANFAR_DEV_HAS_CSUM |
718                         FSL_GIANFAR_DEV_HAS_VLAN |
719                         FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
720                         FSL_GIANFAR_DEV_HAS_EXTENDED_HASH;
721
722         ctype = of_get_property(np, "phy-connection-type", NULL);
723
724         /* We only care about rgmii-id.  The rest are autodetected */
725         if (ctype && !strcmp(ctype, "rgmii-id"))
726                 priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
727         else
728                 priv->interface = PHY_INTERFACE_MODE_MII;
729
730         if (of_get_property(np, "fsl,magic-packet", NULL))
731                 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
732
733         priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
734
735         /* Find the TBI PHY.  If it's not there, we don't support SGMII */
736         priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
737
738         return 0;
739
740 rx_alloc_failed:
741         free_rx_pointers(priv);
742 tx_alloc_failed:
743         free_tx_pointers(priv);
744 err_grp_init:
745         unmap_group_regs(priv);
746         free_netdev(dev);
747         return err;
748 }
749
750 /* Ioctl MII Interface */
751 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
752 {
753         struct gfar_private *priv = netdev_priv(dev);
754
755         if (!netif_running(dev))
756                 return -EINVAL;
757
758         if (!priv->phydev)
759                 return -ENODEV;
760
761         return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd);
762 }
763
764 static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
765 {
766         unsigned int new_bit_map = 0x0;
767         int mask = 0x1 << (max_qs - 1), i;
768         for (i = 0; i < max_qs; i++) {
769                 if (bit_map & mask)
770                         new_bit_map = new_bit_map + (1 << i);
771                 mask = mask >> 0x1;
772         }
773         return new_bit_map;
774 }
775
776 static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
777                                    u32 class)
778 {
779         u32 rqfpr = FPR_FILER_MASK;
780         u32 rqfcr = 0x0;
781
782         rqfar--;
783         rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
784         ftp_rqfpr[rqfar] = rqfpr;
785         ftp_rqfcr[rqfar] = rqfcr;
786         gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
787
788         rqfar--;
789         rqfcr = RQFCR_CMP_NOMATCH;
790         ftp_rqfpr[rqfar] = rqfpr;
791         ftp_rqfcr[rqfar] = rqfcr;
792         gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
793
794         rqfar--;
795         rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
796         rqfpr = class;
797         ftp_rqfcr[rqfar] = rqfcr;
798         ftp_rqfpr[rqfar] = rqfpr;
799         gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
800
801         rqfar--;
802         rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
803         rqfpr = class;
804         ftp_rqfcr[rqfar] = rqfcr;
805         ftp_rqfpr[rqfar] = rqfpr;
806         gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
807
808         return rqfar;
809 }
810
811 static void gfar_init_filer_table(struct gfar_private *priv)
812 {
813         int i = 0x0;
814         u32 rqfar = MAX_FILER_IDX;
815         u32 rqfcr = 0x0;
816         u32 rqfpr = FPR_FILER_MASK;
817
818         /* Default rule */
819         rqfcr = RQFCR_CMP_MATCH;
820         ftp_rqfcr[rqfar] = rqfcr;
821         ftp_rqfpr[rqfar] = rqfpr;
822         gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
823
824         rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
825         rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
826         rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
827         rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
828         rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
829         rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
830
831         /* cur_filer_idx indicated the fisrt non-masked rule */
832         priv->cur_filer_idx = rqfar;
833
834         /* Rest are masked rules */
835         rqfcr = RQFCR_CMP_NOMATCH;
836         for (i = 0; i < rqfar; i++) {
837                 ftp_rqfcr[i] = rqfcr;
838                 ftp_rqfpr[i] = rqfpr;
839                 gfar_write_filer(priv, i, rqfcr, rqfpr);
840         }
841 }
842
843 /* Set up the ethernet device structure, private data,
844  * and anything else we need before we start */
845 static int gfar_probe(struct of_device *ofdev,
846                 const struct of_device_id *match)
847 {
848         u32 tempval;
849         struct net_device *dev = NULL;
850         struct gfar_private *priv = NULL;
851         struct gfar __iomem *regs = NULL;
852         int err = 0, i, grp_idx = 0;
853         int len_devname;
854         u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0;
855         u32 isrg = 0;
856         u32 __iomem *baddr;
857
858         err = gfar_of_init(ofdev, &dev);
859
860         if (err)
861                 return err;
862
863         priv = netdev_priv(dev);
864         priv->ndev = dev;
865         priv->ofdev = ofdev;
866         priv->node = ofdev->node;
867         SET_NETDEV_DEV(dev, &ofdev->dev);
868
869         spin_lock_init(&priv->bflock);
870         INIT_WORK(&priv->reset_task, gfar_reset_task);
871
872         dev_set_drvdata(&ofdev->dev, priv);
873         regs = priv->gfargrp[0].regs;
874
875         /* Stop the DMA engine now, in case it was running before */
876         /* (The firmware could have used it, and left it running). */
877         gfar_halt(dev);
878
879         /* Reset MAC layer */
880         gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
881
882         /* We need to delay at least 3 TX clocks */
883         udelay(2);
884
885         tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
886         gfar_write(&regs->maccfg1, tempval);
887
888         /* Initialize MACCFG2. */
889         gfar_write(&regs->maccfg2, MACCFG2_INIT_SETTINGS);
890
891         /* Initialize ECNTRL */
892         gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
893
894         /* Set the dev->base_addr to the gfar reg region */
895         dev->base_addr = (unsigned long) regs;
896
897         SET_NETDEV_DEV(dev, &ofdev->dev);
898
899         /* Fill in the dev structure */
900         dev->watchdog_timeo = TX_TIMEOUT;
901         dev->mtu = 1500;
902         dev->netdev_ops = &gfar_netdev_ops;
903         dev->ethtool_ops = &gfar_ethtool_ops;
904
905         /* Register for napi ...We are registering NAPI for each grp */
906         for (i = 0; i < priv->num_grps; i++)
907                 netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT);
908
909         if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
910                 priv->rx_csum_enable = 1;
911                 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA;
912         } else
913                 priv->rx_csum_enable = 0;
914
915         priv->vlgrp = NULL;
916
917         if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN)
918                 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
919
920         if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
921                 priv->extended_hash = 1;
922                 priv->hash_width = 9;
923
924                 priv->hash_regs[0] = &regs->igaddr0;
925                 priv->hash_regs[1] = &regs->igaddr1;
926                 priv->hash_regs[2] = &regs->igaddr2;
927                 priv->hash_regs[3] = &regs->igaddr3;
928                 priv->hash_regs[4] = &regs->igaddr4;
929                 priv->hash_regs[5] = &regs->igaddr5;
930                 priv->hash_regs[6] = &regs->igaddr6;
931                 priv->hash_regs[7] = &regs->igaddr7;
932                 priv->hash_regs[8] = &regs->gaddr0;
933                 priv->hash_regs[9] = &regs->gaddr1;
934                 priv->hash_regs[10] = &regs->gaddr2;
935                 priv->hash_regs[11] = &regs->gaddr3;
936                 priv->hash_regs[12] = &regs->gaddr4;
937                 priv->hash_regs[13] = &regs->gaddr5;
938                 priv->hash_regs[14] = &regs->gaddr6;
939                 priv->hash_regs[15] = &regs->gaddr7;
940
941         } else {
942                 priv->extended_hash = 0;
943                 priv->hash_width = 8;
944
945                 priv->hash_regs[0] = &regs->gaddr0;
946                 priv->hash_regs[1] = &regs->gaddr1;
947                 priv->hash_regs[2] = &regs->gaddr2;
948                 priv->hash_regs[3] = &regs->gaddr3;
949                 priv->hash_regs[4] = &regs->gaddr4;
950                 priv->hash_regs[5] = &regs->gaddr5;
951                 priv->hash_regs[6] = &regs->gaddr6;
952                 priv->hash_regs[7] = &regs->gaddr7;
953         }
954
955         if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
956                 priv->padding = DEFAULT_PADDING;
957         else
958                 priv->padding = 0;
959
960         if (dev->features & NETIF_F_IP_CSUM)
961                 dev->hard_header_len += GMAC_FCB_LEN;
962
963         /* Program the isrg regs only if number of grps > 1 */
964         if (priv->num_grps > 1) {
965                 baddr = &regs->isrg0;
966                 for (i = 0; i < priv->num_grps; i++) {
967                         isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX);
968                         isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX);
969                         gfar_write(baddr, isrg);
970                         baddr++;
971                         isrg = 0x0;
972                 }
973         }
974
975         /* Need to reverse the bit maps as  bit_map's MSB is q0
976          * but, for_each_bit parses from right to left, which
977          * basically reverses the queue numbers */
978         for (i = 0; i< priv->num_grps; i++) {
979                 priv->gfargrp[i].tx_bit_map = reverse_bitmap(
980                                 priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
981                 priv->gfargrp[i].rx_bit_map = reverse_bitmap(
982                                 priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
983         }
984
985         /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
986          * also assign queues to groups */
987         for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
988                 priv->gfargrp[grp_idx].num_rx_queues = 0x0;
989                 for_each_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
990                                 priv->num_rx_queues) {
991                         priv->gfargrp[grp_idx].num_rx_queues++;
992                         priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
993                         rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
994                         rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
995                 }
996                 priv->gfargrp[grp_idx].num_tx_queues = 0x0;
997                 for_each_bit (i, &priv->gfargrp[grp_idx].tx_bit_map,
998                                 priv->num_tx_queues) {
999                         priv->gfargrp[grp_idx].num_tx_queues++;
1000                         priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
1001                         tstat = tstat | (TSTAT_CLEAR_THALT >> i);
1002                         tqueue = tqueue | (TQUEUE_EN0 >> i);
1003                 }
1004                 priv->gfargrp[grp_idx].rstat = rstat;
1005                 priv->gfargrp[grp_idx].tstat = tstat;
1006                 rstat = tstat =0;
1007         }
1008
1009         gfar_write(&regs->rqueue, rqueue);
1010         gfar_write(&regs->tqueue, tqueue);
1011
1012         priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
1013
1014         /* Initializing some of the rx/tx queue level parameters */
1015         for (i = 0; i < priv->num_tx_queues; i++) {
1016                 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
1017                 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
1018                 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
1019                 priv->tx_queue[i]->txic = DEFAULT_TXIC;
1020         }
1021
1022         for (i = 0; i < priv->num_rx_queues; i++) {
1023                 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
1024                 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
1025                 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
1026         }
1027
1028         /* enable filer if using multiple RX queues*/
1029         if(priv->num_rx_queues > 1)
1030                 priv->rx_filer_enable = 1;
1031         /* Enable most messages by default */
1032         priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
1033
1034         /* Carrier starts down, phylib will bring it up */
1035         netif_carrier_off(dev);
1036
1037         err = register_netdev(dev);
1038
1039         if (err) {
1040                 printk(KERN_ERR "%s: Cannot register net device, aborting.\n",
1041                                 dev->name);
1042                 goto register_fail;
1043         }
1044
1045         device_init_wakeup(&dev->dev,
1046                 priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1047
1048         /* fill out IRQ number and name fields */
1049         len_devname = strlen(dev->name);
1050         for (i = 0; i < priv->num_grps; i++) {
1051                 strncpy(&priv->gfargrp[i].int_name_tx[0], dev->name,
1052                                 len_devname);
1053                 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1054                         strncpy(&priv->gfargrp[i].int_name_tx[len_devname],
1055                                 "_g", sizeof("_g"));
1056                         priv->gfargrp[i].int_name_tx[
1057                                 strlen(priv->gfargrp[i].int_name_tx)] = i+48;
1058                         strncpy(&priv->gfargrp[i].int_name_tx[strlen(
1059                                 priv->gfargrp[i].int_name_tx)],
1060                                 "_tx", sizeof("_tx") + 1);
1061
1062                         strncpy(&priv->gfargrp[i].int_name_rx[0], dev->name,
1063                                         len_devname);
1064                         strncpy(&priv->gfargrp[i].int_name_rx[len_devname],
1065                                         "_g", sizeof("_g"));
1066                         priv->gfargrp[i].int_name_rx[
1067                                 strlen(priv->gfargrp[i].int_name_rx)] = i+48;
1068                         strncpy(&priv->gfargrp[i].int_name_rx[strlen(
1069                                 priv->gfargrp[i].int_name_rx)],
1070                                 "_rx", sizeof("_rx") + 1);
1071
1072                         strncpy(&priv->gfargrp[i].int_name_er[0], dev->name,
1073                                         len_devname);
1074                         strncpy(&priv->gfargrp[i].int_name_er[len_devname],
1075                                 "_g", sizeof("_g"));
1076                         priv->gfargrp[i].int_name_er[strlen(
1077                                         priv->gfargrp[i].int_name_er)] = i+48;
1078                         strncpy(&priv->gfargrp[i].int_name_er[strlen(\
1079                                 priv->gfargrp[i].int_name_er)],
1080                                 "_er", sizeof("_er") + 1);
1081                 } else
1082                         priv->gfargrp[i].int_name_tx[len_devname] = '\0';
1083         }
1084
1085         /* Initialize the filer table */
1086         gfar_init_filer_table(priv);
1087
1088         /* Create all the sysfs files */
1089         gfar_init_sysfs(dev);
1090
1091         /* Print out the device info */
1092         printk(KERN_INFO DEVICE_NAME "%pM\n", dev->name, dev->dev_addr);
1093
1094         /* Even more device info helps when determining which kernel */
1095         /* provided which set of benchmarks. */
1096         printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
1097         for (i = 0; i < priv->num_rx_queues; i++)
1098                 printk(KERN_INFO "%s: :RX BD ring size for Q[%d]: %d\n",
1099                         dev->name, i, priv->rx_queue[i]->rx_ring_size);
1100         for(i = 0; i < priv->num_tx_queues; i++)
1101                  printk(KERN_INFO "%s:TX BD ring size for Q[%d]: %d\n",
1102                         dev->name, i, priv->tx_queue[i]->tx_ring_size);
1103
1104         return 0;
1105
1106 register_fail:
1107         unmap_group_regs(priv);
1108         free_tx_pointers(priv);
1109         free_rx_pointers(priv);
1110         if (priv->phy_node)
1111                 of_node_put(priv->phy_node);
1112         if (priv->tbi_node)
1113                 of_node_put(priv->tbi_node);
1114         free_netdev(dev);
1115         return err;
1116 }
1117
1118 static int gfar_remove(struct of_device *ofdev)
1119 {
1120         struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
1121
1122         if (priv->phy_node)
1123                 of_node_put(priv->phy_node);
1124         if (priv->tbi_node)
1125                 of_node_put(priv->tbi_node);
1126
1127         dev_set_drvdata(&ofdev->dev, NULL);
1128
1129         unregister_netdev(priv->ndev);
1130         unmap_group_regs(priv);
1131         free_netdev(priv->ndev);
1132
1133         return 0;
1134 }
1135
1136 #ifdef CONFIG_PM
1137
1138 static int gfar_suspend(struct device *dev)
1139 {
1140         struct gfar_private *priv = dev_get_drvdata(dev);
1141         struct net_device *ndev = priv->ndev;
1142         struct gfar __iomem *regs = priv->gfargrp[0].regs;
1143         unsigned long flags;
1144         u32 tempval;
1145
1146         int magic_packet = priv->wol_en &&
1147                 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1148
1149         netif_device_detach(ndev);
1150
1151         if (netif_running(ndev)) {
1152
1153                 local_irq_save(flags);
1154                 lock_tx_qs(priv);
1155                 lock_rx_qs(priv);
1156
1157                 gfar_halt_nodisable(ndev);
1158
1159                 /* Disable Tx, and Rx if wake-on-LAN is disabled. */
1160                 tempval = gfar_read(&regs->maccfg1);
1161
1162                 tempval &= ~MACCFG1_TX_EN;
1163
1164                 if (!magic_packet)
1165                         tempval &= ~MACCFG1_RX_EN;
1166
1167                 gfar_write(&regs->maccfg1, tempval);
1168
1169                 unlock_rx_qs(priv);
1170                 unlock_tx_qs(priv);
1171                 local_irq_restore(flags);
1172
1173                 disable_napi(priv);
1174
1175                 if (magic_packet) {
1176                         /* Enable interrupt on Magic Packet */
1177                         gfar_write(&regs->imask, IMASK_MAG);
1178
1179                         /* Enable Magic Packet mode */
1180                         tempval = gfar_read(&regs->maccfg2);
1181                         tempval |= MACCFG2_MPEN;
1182                         gfar_write(&regs->maccfg2, tempval);
1183                 } else {
1184                         phy_stop(priv->phydev);
1185                 }
1186         }
1187
1188         return 0;
1189 }
1190
1191 static int gfar_resume(struct device *dev)
1192 {
1193         struct gfar_private *priv = dev_get_drvdata(dev);
1194         struct net_device *ndev = priv->ndev;
1195         struct gfar __iomem *regs = priv->gfargrp[0].regs;
1196         unsigned long flags;
1197         u32 tempval;
1198         int magic_packet = priv->wol_en &&
1199                 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1200
1201         if (!netif_running(ndev)) {
1202                 netif_device_attach(ndev);
1203                 return 0;
1204         }
1205
1206         if (!magic_packet && priv->phydev)
1207                 phy_start(priv->phydev);
1208
1209         /* Disable Magic Packet mode, in case something
1210          * else woke us up.
1211          */
1212         local_irq_save(flags);
1213         lock_tx_qs(priv);
1214         lock_rx_qs(priv);
1215
1216         tempval = gfar_read(&regs->maccfg2);
1217         tempval &= ~MACCFG2_MPEN;
1218         gfar_write(&regs->maccfg2, tempval);
1219
1220         gfar_start(ndev);
1221
1222         unlock_rx_qs(priv);
1223         unlock_tx_qs(priv);
1224         local_irq_restore(flags);
1225
1226         netif_device_attach(ndev);
1227
1228         enable_napi(priv);
1229
1230         return 0;
1231 }
1232
1233 static int gfar_restore(struct device *dev)
1234 {
1235         struct gfar_private *priv = dev_get_drvdata(dev);
1236         struct net_device *ndev = priv->ndev;
1237
1238         if (!netif_running(ndev))
1239                 return 0;
1240
1241         gfar_init_bds(ndev);
1242         init_registers(ndev);
1243         gfar_set_mac_address(ndev);
1244         gfar_init_mac(ndev);
1245         gfar_start(ndev);
1246
1247         priv->oldlink = 0;
1248         priv->oldspeed = 0;
1249         priv->oldduplex = -1;
1250
1251         if (priv->phydev)
1252                 phy_start(priv->phydev);
1253
1254         netif_device_attach(ndev);
1255         enable_napi(priv);
1256
1257         return 0;
1258 }
1259
1260 static struct dev_pm_ops gfar_pm_ops = {
1261         .suspend = gfar_suspend,
1262         .resume = gfar_resume,
1263         .freeze = gfar_suspend,
1264         .thaw = gfar_resume,
1265         .restore = gfar_restore,
1266 };
1267
1268 #define GFAR_PM_OPS (&gfar_pm_ops)
1269
1270 static int gfar_legacy_suspend(struct of_device *ofdev, pm_message_t state)
1271 {
1272         return gfar_suspend(&ofdev->dev);
1273 }
1274
1275 static int gfar_legacy_resume(struct of_device *ofdev)
1276 {
1277         return gfar_resume(&ofdev->dev);
1278 }
1279
1280 #else
1281
1282 #define GFAR_PM_OPS NULL
1283 #define gfar_legacy_suspend NULL
1284 #define gfar_legacy_resume NULL
1285
1286 #endif
1287
1288 /* Reads the controller's registers to determine what interface
1289  * connects it to the PHY.
1290  */
1291 static phy_interface_t gfar_get_interface(struct net_device *dev)
1292 {
1293         struct gfar_private *priv = netdev_priv(dev);
1294         struct gfar __iomem *regs = priv->gfargrp[0].regs;
1295         u32 ecntrl;
1296
1297         ecntrl = gfar_read(&regs->ecntrl);
1298
1299         if (ecntrl & ECNTRL_SGMII_MODE)
1300                 return PHY_INTERFACE_MODE_SGMII;
1301
1302         if (ecntrl & ECNTRL_TBI_MODE) {
1303                 if (ecntrl & ECNTRL_REDUCED_MODE)
1304                         return PHY_INTERFACE_MODE_RTBI;
1305                 else
1306                         return PHY_INTERFACE_MODE_TBI;
1307         }
1308
1309         if (ecntrl & ECNTRL_REDUCED_MODE) {
1310                 if (ecntrl & ECNTRL_REDUCED_MII_MODE)
1311                         return PHY_INTERFACE_MODE_RMII;
1312                 else {
1313                         phy_interface_t interface = priv->interface;
1314
1315                         /*
1316                          * This isn't autodetected right now, so it must
1317                          * be set by the device tree or platform code.
1318                          */
1319                         if (interface == PHY_INTERFACE_MODE_RGMII_ID)
1320                                 return PHY_INTERFACE_MODE_RGMII_ID;
1321
1322                         return PHY_INTERFACE_MODE_RGMII;
1323                 }
1324         }
1325
1326         if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
1327                 return PHY_INTERFACE_MODE_GMII;
1328
1329         return PHY_INTERFACE_MODE_MII;
1330 }
1331
1332
1333 /* Initializes driver's PHY state, and attaches to the PHY.
1334  * Returns 0 on success.
1335  */
1336 static int init_phy(struct net_device *dev)
1337 {
1338         struct gfar_private *priv = netdev_priv(dev);
1339         uint gigabit_support =
1340                 priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
1341                 SUPPORTED_1000baseT_Full : 0;
1342         phy_interface_t interface;
1343
1344         priv->oldlink = 0;
1345         priv->oldspeed = 0;
1346         priv->oldduplex = -1;
1347
1348         interface = gfar_get_interface(dev);
1349
1350         priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1351                                       interface);
1352         if (!priv->phydev)
1353                 priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link,
1354                                                          interface);
1355         if (!priv->phydev) {
1356                 dev_err(&dev->dev, "could not attach to PHY\n");
1357                 return -ENODEV;
1358         }
1359
1360         if (interface == PHY_INTERFACE_MODE_SGMII)
1361                 gfar_configure_serdes(dev);
1362
1363         /* Remove any features not supported by the controller */
1364         priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
1365         priv->phydev->advertising = priv->phydev->supported;
1366
1367         return 0;
1368 }
1369
1370 /*
1371  * Initialize TBI PHY interface for communicating with the
1372  * SERDES lynx PHY on the chip.  We communicate with this PHY
1373  * through the MDIO bus on each controller, treating it as a
1374  * "normal" PHY at the address found in the TBIPA register.  We assume
1375  * that the TBIPA register is valid.  Either the MDIO bus code will set
1376  * it to a value that doesn't conflict with other PHYs on the bus, or the
1377  * value doesn't matter, as there are no other PHYs on the bus.
1378  */
1379 static void gfar_configure_serdes(struct net_device *dev)
1380 {
1381         struct gfar_private *priv = netdev_priv(dev);
1382         struct phy_device *tbiphy;
1383
1384         if (!priv->tbi_node) {
1385                 dev_warn(&dev->dev, "error: SGMII mode requires that the "
1386                                     "device tree specify a tbi-handle\n");
1387                 return;
1388         }
1389
1390         tbiphy = of_phy_find_device(priv->tbi_node);
1391         if (!tbiphy) {
1392                 dev_err(&dev->dev, "error: Could not get TBI device\n");
1393                 return;
1394         }
1395
1396         /*
1397          * If the link is already up, we must already be ok, and don't need to
1398          * configure and reset the TBI<->SerDes link.  Maybe U-Boot configured
1399          * everything for us?  Resetting it takes the link down and requires
1400          * several seconds for it to come back.
1401          */
1402         if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
1403                 return;
1404
1405         /* Single clk mode, mii mode off(for serdes communication) */
1406         phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
1407
1408         phy_write(tbiphy, MII_ADVERTISE,
1409                         ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1410                         ADVERTISE_1000XPSE_ASYM);
1411
1412         phy_write(tbiphy, MII_BMCR, BMCR_ANENABLE |
1413                         BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000);
1414 }
1415
1416 static void init_registers(struct net_device *dev)
1417 {
1418         struct gfar_private *priv = netdev_priv(dev);
1419         struct gfar __iomem *regs = NULL;
1420         int i = 0;
1421
1422         for (i = 0; i < priv->num_grps; i++) {
1423                 regs = priv->gfargrp[i].regs;
1424                 /* Clear IEVENT */
1425                 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
1426
1427                 /* Initialize IMASK */
1428                 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1429         }
1430
1431         regs = priv->gfargrp[0].regs;
1432         /* Init hash registers to zero */
1433         gfar_write(&regs->igaddr0, 0);
1434         gfar_write(&regs->igaddr1, 0);
1435         gfar_write(&regs->igaddr2, 0);
1436         gfar_write(&regs->igaddr3, 0);
1437         gfar_write(&regs->igaddr4, 0);
1438         gfar_write(&regs->igaddr5, 0);
1439         gfar_write(&regs->igaddr6, 0);
1440         gfar_write(&regs->igaddr7, 0);
1441
1442         gfar_write(&regs->gaddr0, 0);
1443         gfar_write(&regs->gaddr1, 0);
1444         gfar_write(&regs->gaddr2, 0);
1445         gfar_write(&regs->gaddr3, 0);
1446         gfar_write(&regs->gaddr4, 0);
1447         gfar_write(&regs->gaddr5, 0);
1448         gfar_write(&regs->gaddr6, 0);
1449         gfar_write(&regs->gaddr7, 0);
1450
1451         /* Zero out the rmon mib registers if it has them */
1452         if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
1453                 memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib));
1454
1455                 /* Mask off the CAM interrupts */
1456                 gfar_write(&regs->rmon.cam1, 0xffffffff);
1457                 gfar_write(&regs->rmon.cam2, 0xffffffff);
1458         }
1459
1460         /* Initialize the max receive buffer length */
1461         gfar_write(&regs->mrblr, priv->rx_buffer_size);
1462
1463         /* Initialize the Minimum Frame Length Register */
1464         gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
1465 }
1466
1467
1468 /* Halt the receive and transmit queues */
1469 static void gfar_halt_nodisable(struct net_device *dev)
1470 {
1471         struct gfar_private *priv = netdev_priv(dev);
1472         struct gfar __iomem *regs = NULL;
1473         u32 tempval;
1474         int i = 0;
1475
1476         for (i = 0; i < priv->num_grps; i++) {
1477                 regs = priv->gfargrp[i].regs;
1478                 /* Mask all interrupts */
1479                 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1480
1481                 /* Clear all interrupts */
1482                 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
1483         }
1484
1485         regs = priv->gfargrp[0].regs;
1486         /* Stop the DMA, and wait for it to stop */
1487         tempval = gfar_read(&regs->dmactrl);
1488         if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
1489             != (DMACTRL_GRS | DMACTRL_GTS)) {
1490                 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1491                 gfar_write(&regs->dmactrl, tempval);
1492
1493                 while (!(gfar_read(&regs->ievent) &
1494                          (IEVENT_GRSC | IEVENT_GTSC)))
1495                         cpu_relax();
1496         }
1497 }
1498
1499 /* Halt the receive and transmit queues */
1500 void gfar_halt(struct net_device *dev)
1501 {
1502         struct gfar_private *priv = netdev_priv(dev);
1503         struct gfar __iomem *regs = priv->gfargrp[0].regs;
1504         u32 tempval;
1505
1506         gfar_halt_nodisable(dev);
1507
1508         /* Disable Rx and Tx */
1509         tempval = gfar_read(&regs->maccfg1);
1510         tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1511         gfar_write(&regs->maccfg1, tempval);
1512 }
1513
1514 static void free_grp_irqs(struct gfar_priv_grp *grp)
1515 {
1516         free_irq(grp->interruptError, grp);
1517         free_irq(grp->interruptTransmit, grp);
1518         free_irq(grp->interruptReceive, grp);
1519 }
1520
1521 void stop_gfar(struct net_device *dev)
1522 {
1523         struct gfar_private *priv = netdev_priv(dev);
1524         unsigned long flags;
1525         int i;
1526
1527         phy_stop(priv->phydev);
1528
1529
1530         /* Lock it down */
1531         local_irq_save(flags);
1532         lock_tx_qs(priv);
1533         lock_rx_qs(priv);
1534
1535         gfar_halt(dev);
1536
1537         unlock_rx_qs(priv);
1538         unlock_tx_qs(priv);
1539         local_irq_restore(flags);
1540
1541         /* Free the IRQs */
1542         if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1543                 for (i = 0; i < priv->num_grps; i++)
1544                         free_grp_irqs(&priv->gfargrp[i]);
1545         } else {
1546                 for (i = 0; i < priv->num_grps; i++)
1547                         free_irq(priv->gfargrp[i].interruptTransmit,
1548                                         &priv->gfargrp[i]);
1549         }
1550
1551         free_skb_resources(priv);
1552 }
1553
1554 static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
1555 {
1556         struct txbd8 *txbdp;
1557         struct gfar_private *priv = netdev_priv(tx_queue->dev);
1558         int i, j;
1559
1560         txbdp = tx_queue->tx_bd_base;
1561
1562         for (i = 0; i < tx_queue->tx_ring_size; i++) {
1563                 if (!tx_queue->tx_skbuff[i])
1564                         continue;
1565
1566                 dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
1567                                 txbdp->length, DMA_TO_DEVICE);
1568                 txbdp->lstatus = 0;
1569                 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1570                                 j++) {
1571                         txbdp++;
1572                         dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
1573                                         txbdp->length, DMA_TO_DEVICE);
1574                 }
1575                 txbdp++;
1576                 dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1577                 tx_queue->tx_skbuff[i] = NULL;
1578         }
1579         kfree(tx_queue->tx_skbuff);
1580 }
1581
1582 static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1583 {
1584         struct rxbd8 *rxbdp;
1585         struct gfar_private *priv = netdev_priv(rx_queue->dev);
1586         int i;
1587
1588         rxbdp = rx_queue->rx_bd_base;
1589
1590         for (i = 0; i < rx_queue->rx_ring_size; i++) {
1591                 if (rx_queue->rx_skbuff[i]) {
1592                         dma_unmap_single(&priv->ofdev->dev,
1593                                         rxbdp->bufPtr, priv->rx_buffer_size,
1594                                         DMA_FROM_DEVICE);
1595                         dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
1596                         rx_queue->rx_skbuff[i] = NULL;
1597                 }
1598                 rxbdp->lstatus = 0;
1599                 rxbdp->bufPtr = 0;
1600                 rxbdp++;
1601         }
1602         kfree(rx_queue->rx_skbuff);
1603 }
1604
1605 /* If there are any tx skbs or rx skbs still around, free them.
1606  * Then free tx_skbuff and rx_skbuff */
1607 static void free_skb_resources(struct gfar_private *priv)
1608 {
1609         struct gfar_priv_tx_q *tx_queue = NULL;
1610         struct gfar_priv_rx_q *rx_queue = NULL;
1611         int i;
1612
1613         /* Go through all the buffer descriptors and free their data buffers */
1614         for (i = 0; i < priv->num_tx_queues; i++) {
1615                 tx_queue = priv->tx_queue[i];
1616                 if(!tx_queue->tx_skbuff)
1617                         free_skb_tx_queue(tx_queue);
1618         }
1619
1620         for (i = 0; i < priv->num_rx_queues; i++) {
1621                 rx_queue = priv->rx_queue[i];
1622                 if(!rx_queue->rx_skbuff)
1623                         free_skb_rx_queue(rx_queue);
1624         }
1625
1626         dma_free_coherent(&priv->ofdev->dev,
1627                         sizeof(struct txbd8) * priv->total_tx_ring_size +
1628                         sizeof(struct rxbd8) * priv->total_rx_ring_size,
1629                         priv->tx_queue[0]->tx_bd_base,
1630                         priv->tx_queue[0]->tx_bd_dma_base);
1631 }
1632
1633 void gfar_start(struct net_device *dev)
1634 {
1635         struct gfar_private *priv = netdev_priv(dev);
1636         struct gfar __iomem *regs = priv->gfargrp[0].regs;
1637         u32 tempval;
1638         int i = 0;
1639
1640         /* Enable Rx and Tx in MACCFG1 */
1641         tempval = gfar_read(&regs->maccfg1);
1642         tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
1643         gfar_write(&regs->maccfg1, tempval);
1644
1645         /* Initialize DMACTRL to have WWR and WOP */
1646         tempval = gfar_read(&regs->dmactrl);
1647         tempval |= DMACTRL_INIT_SETTINGS;
1648         gfar_write(&regs->dmactrl, tempval);
1649
1650         /* Make sure we aren't stopped */
1651         tempval = gfar_read(&regs->dmactrl);
1652         tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
1653         gfar_write(&regs->dmactrl, tempval);
1654
1655         for (i = 0; i < priv->num_grps; i++) {
1656                 regs = priv->gfargrp[i].regs;
1657                 /* Clear THLT/RHLT, so that the DMA starts polling now */
1658                 gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
1659                 gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
1660                 /* Unmask the interrupts we look for */
1661                 gfar_write(&regs->imask, IMASK_DEFAULT);
1662         }
1663
1664         dev->trans_start = jiffies;
1665 }
1666
1667 void gfar_configure_coalescing(struct gfar_private *priv,
1668         unsigned long tx_mask, unsigned long rx_mask)
1669 {
1670         struct gfar __iomem *regs = priv->gfargrp[0].regs;
1671         u32 __iomem *baddr;
1672         int i = 0;
1673
1674         /* Backward compatible case ---- even if we enable
1675          * multiple queues, there's only single reg to program
1676          */
1677         gfar_write(&regs->txic, 0);
1678         if(likely(priv->tx_queue[0]->txcoalescing))
1679                 gfar_write(&regs->txic, priv->tx_queue[0]->txic);
1680
1681         gfar_write(&regs->rxic, 0);
1682         if(unlikely(priv->rx_queue[0]->rxcoalescing))
1683                 gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
1684
1685         if (priv->mode == MQ_MG_MODE) {
1686                 baddr = &regs->txic0;
1687                 for_each_bit (i, &tx_mask, priv->num_tx_queues) {
1688                         if (likely(priv->tx_queue[i]->txcoalescing)) {
1689                                 gfar_write(baddr + i, 0);
1690                                 gfar_write(baddr + i, priv->tx_queue[i]->txic);
1691                         }
1692                 }
1693
1694                 baddr = &regs->rxic0;
1695                 for_each_bit (i, &rx_mask, priv->num_rx_queues) {
1696                         if (likely(priv->rx_queue[i]->rxcoalescing)) {
1697                                 gfar_write(baddr + i, 0);
1698                                 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
1699                         }
1700                 }
1701         }
1702 }
1703
1704 static int register_grp_irqs(struct gfar_priv_grp *grp)
1705 {
1706         struct gfar_private *priv = grp->priv;
1707         struct net_device *dev = priv->ndev;
1708         int err;
1709
1710         /* If the device has multiple interrupts, register for
1711          * them.  Otherwise, only register for the one */
1712         if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1713                 /* Install our interrupt handlers for Error,
1714                  * Transmit, and Receive */
1715                 if ((err = request_irq(grp->interruptError, gfar_error, 0,
1716                                 grp->int_name_er,grp)) < 0) {
1717                         if (netif_msg_intr(priv))
1718                                 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1719                                         dev->name, grp->interruptError);
1720
1721                                 goto err_irq_fail;
1722                 }
1723
1724                 if ((err = request_irq(grp->interruptTransmit, gfar_transmit,
1725                                 0, grp->int_name_tx, grp)) < 0) {
1726                         if (netif_msg_intr(priv))
1727                                 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1728                                         dev->name, grp->interruptTransmit);
1729                         goto tx_irq_fail;
1730                 }
1731
1732                 if ((err = request_irq(grp->interruptReceive, gfar_receive, 0,
1733                                 grp->int_name_rx, grp)) < 0) {
1734                         if (netif_msg_intr(priv))
1735                                 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1736                                         dev->name, grp->interruptReceive);
1737                         goto rx_irq_fail;
1738                 }
1739         } else {
1740                 if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0,
1741                                 grp->int_name_tx, grp)) < 0) {
1742                         if (netif_msg_intr(priv))
1743                                 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1744                                         dev->name, grp->interruptTransmit);
1745                         goto err_irq_fail;
1746                 }
1747         }
1748
1749         return 0;
1750
1751 rx_irq_fail:
1752         free_irq(grp->interruptTransmit, grp);
1753 tx_irq_fail:
1754         free_irq(grp->interruptError, grp);
1755 err_irq_fail:
1756         return err;
1757
1758 }
1759
1760 /* Bring the controller up and running */
1761 int startup_gfar(struct net_device *ndev)
1762 {
1763         struct gfar_private *priv = netdev_priv(ndev);
1764         struct gfar __iomem *regs = NULL;
1765         int err, i, j;
1766
1767         for (i = 0; i < priv->num_grps; i++) {
1768                 regs= priv->gfargrp[i].regs;
1769                 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1770         }
1771
1772         regs= priv->gfargrp[0].regs;
1773         err = gfar_alloc_skb_resources(ndev);
1774         if (err)
1775                 return err;
1776
1777         gfar_init_mac(ndev);
1778
1779         for (i = 0; i < priv->num_grps; i++) {
1780                 err = register_grp_irqs(&priv->gfargrp[i]);
1781                 if (err) {
1782                         for (j = 0; j < i; j++)
1783                                 free_grp_irqs(&priv->gfargrp[j]);
1784                                 goto irq_fail;
1785                 }
1786         }
1787
1788         /* Start the controller */
1789         gfar_start(ndev);
1790
1791         phy_start(priv->phydev);
1792
1793         gfar_configure_coalescing(priv, 0xFF, 0xFF);
1794
1795         return 0;
1796
1797 irq_fail:
1798         free_skb_resources(priv);
1799         return err;
1800 }
1801
1802 /* Called when something needs to use the ethernet device */
1803 /* Returns 0 for success. */
1804 static int gfar_enet_open(struct net_device *dev)
1805 {
1806         struct gfar_private *priv = netdev_priv(dev);
1807         int err;
1808
1809         enable_napi(priv);
1810
1811         skb_queue_head_init(&priv->rx_recycle);
1812
1813         /* Initialize a bunch of registers */
1814         init_registers(dev);
1815
1816         gfar_set_mac_address(dev);
1817
1818         err = init_phy(dev);
1819
1820         if (err) {
1821                 disable_napi(priv);
1822                 return err;
1823         }
1824
1825         err = startup_gfar(dev);
1826         if (err) {
1827                 disable_napi(priv);
1828                 return err;
1829         }
1830
1831         netif_tx_start_all_queues(dev);
1832
1833         device_set_wakeup_enable(&dev->dev, priv->wol_en);
1834
1835         return err;
1836 }
1837
1838 static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
1839 {
1840         struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
1841
1842         memset(fcb, 0, GMAC_FCB_LEN);
1843
1844         return fcb;
1845 }
1846
1847 static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb)
1848 {
1849         u8 flags = 0;
1850
1851         /* If we're here, it's a IP packet with a TCP or UDP
1852          * payload.  We set it to checksum, using a pseudo-header
1853          * we provide
1854          */
1855         flags = TXFCB_DEFAULT;
1856
1857         /* Tell the controller what the protocol is */
1858         /* And provide the already calculated phcs */
1859         if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
1860                 flags |= TXFCB_UDP;
1861                 fcb->phcs = udp_hdr(skb)->check;
1862         } else
1863                 fcb->phcs = tcp_hdr(skb)->check;
1864
1865         /* l3os is the distance between the start of the
1866          * frame (skb->data) and the start of the IP hdr.
1867          * l4os is the distance between the start of the
1868          * l3 hdr and the l4 hdr */
1869         fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN);
1870         fcb->l4os = skb_network_header_len(skb);
1871
1872         fcb->flags = flags;
1873 }
1874
1875 void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
1876 {
1877         fcb->flags |= TXFCB_VLN;
1878         fcb->vlctl = vlan_tx_tag_get(skb);
1879 }
1880
1881 static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
1882                                struct txbd8 *base, int ring_size)
1883 {
1884         struct txbd8 *new_bd = bdp + stride;
1885
1886         return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
1887 }
1888
1889 static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
1890                 int ring_size)
1891 {
1892         return skip_txbd(bdp, 1, base, ring_size);
1893 }
1894
1895 /* This is called by the kernel when a frame is ready for transmission. */
1896 /* It is pointed to by the dev->hard_start_xmit function pointer */
1897 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1898 {
1899         struct gfar_private *priv = netdev_priv(dev);
1900         struct gfar_priv_tx_q *tx_queue = NULL;
1901         struct netdev_queue *txq;
1902         struct gfar __iomem *regs = NULL;
1903         struct txfcb *fcb = NULL;
1904         struct txbd8 *txbdp, *txbdp_start, *base;
1905         u32 lstatus;
1906         int i, rq = 0;
1907         u32 bufaddr;
1908         unsigned long flags;
1909         unsigned int nr_frags, length;
1910
1911
1912         rq = skb->queue_mapping;
1913         tx_queue = priv->tx_queue[rq];
1914         txq = netdev_get_tx_queue(dev, rq);
1915         base = tx_queue->tx_bd_base;
1916         regs = tx_queue->grp->regs;
1917
1918         /* make space for additional header when fcb is needed */
1919         if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
1920                         (priv->vlgrp && vlan_tx_tag_present(skb))) &&
1921                         (skb_headroom(skb) < GMAC_FCB_LEN)) {
1922                 struct sk_buff *skb_new;
1923
1924                 skb_new = skb_realloc_headroom(skb, GMAC_FCB_LEN);
1925                 if (!skb_new) {
1926                         dev->stats.tx_errors++;
1927                         kfree_skb(skb);
1928                         return NETDEV_TX_OK;
1929                 }
1930                 kfree_skb(skb);
1931                 skb = skb_new;
1932         }
1933
1934         /* total number of fragments in the SKB */
1935         nr_frags = skb_shinfo(skb)->nr_frags;
1936
1937         /* check if there is space to queue this packet */
1938         if ((nr_frags+1) > tx_queue->num_txbdfree) {
1939                 /* no space, stop the queue */
1940                 netif_tx_stop_queue(txq);
1941                 dev->stats.tx_fifo_errors++;
1942                 return NETDEV_TX_BUSY;
1943         }
1944
1945         /* Update transmit stats */
1946         dev->stats.tx_bytes += skb->len;
1947
1948         txbdp = txbdp_start = tx_queue->cur_tx;
1949
1950         if (nr_frags == 0) {
1951                 lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1952         } else {
1953                 /* Place the fragment addresses and lengths into the TxBDs */
1954                 for (i = 0; i < nr_frags; i++) {
1955                         /* Point at the next BD, wrapping as needed */
1956                         txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1957
1958                         length = skb_shinfo(skb)->frags[i].size;
1959
1960                         lstatus = txbdp->lstatus | length |
1961                                 BD_LFLAG(TXBD_READY);
1962
1963                         /* Handle the last BD specially */
1964                         if (i == nr_frags - 1)
1965                                 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1966
1967                         bufaddr = dma_map_page(&priv->ofdev->dev,
1968                                         skb_shinfo(skb)->frags[i].page,
1969                                         skb_shinfo(skb)->frags[i].page_offset,
1970                                         length,
1971                                         DMA_TO_DEVICE);
1972
1973                         /* set the TxBD length and buffer pointer */
1974                         txbdp->bufPtr = bufaddr;
1975                         txbdp->lstatus = lstatus;
1976                 }
1977
1978                 lstatus = txbdp_start->lstatus;
1979         }
1980
1981         /* Set up checksumming */
1982         if (CHECKSUM_PARTIAL == skb->ip_summed) {
1983                 fcb = gfar_add_fcb(skb);
1984                 lstatus |= BD_LFLAG(TXBD_TOE);
1985                 gfar_tx_checksum(skb, fcb);
1986         }
1987
1988         if (priv->vlgrp && vlan_tx_tag_present(skb)) {
1989                 if (unlikely(NULL == fcb)) {
1990                         fcb = gfar_add_fcb(skb);
1991                         lstatus |= BD_LFLAG(TXBD_TOE);
1992                 }
1993
1994                 gfar_tx_vlan(skb, fcb);
1995         }
1996
1997         /* setup the TxBD length and buffer pointer for the first BD */
1998         tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
1999         txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
2000                         skb_headlen(skb), DMA_TO_DEVICE);
2001
2002         lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
2003
2004         /*
2005          * We can work in parallel with gfar_clean_tx_ring(), except
2006          * when modifying num_txbdfree. Note that we didn't grab the lock
2007          * when we were reading the num_txbdfree and checking for available
2008          * space, that's because outside of this function it can only grow,
2009          * and once we've got needed space, it cannot suddenly disappear.
2010          *
2011          * The lock also protects us from gfar_error(), which can modify
2012          * regs->tstat and thus retrigger the transfers, which is why we
2013          * also must grab the lock before setting ready bit for the first
2014          * to be transmitted BD.
2015          */
2016         spin_lock_irqsave(&tx_queue->txlock, flags);
2017
2018         /*
2019          * The powerpc-specific eieio() is used, as wmb() has too strong
2020          * semantics (it requires synchronization between cacheable and
2021          * uncacheable mappings, which eieio doesn't provide and which we
2022          * don't need), thus requiring a more expensive sync instruction.  At
2023          * some point, the set of architecture-independent barrier functions
2024          * should be expanded to include weaker barriers.
2025          */
2026         eieio();
2027
2028         txbdp_start->lstatus = lstatus;
2029
2030         /* Update the current skb pointer to the next entry we will use
2031          * (wrapping if necessary) */
2032         tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
2033                 TX_RING_MOD_MASK(tx_queue->tx_ring_size);
2034
2035         tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2036
2037         /* reduce TxBD free count */
2038         tx_queue->num_txbdfree -= (nr_frags + 1);
2039
2040         dev->trans_start = jiffies;
2041
2042         /* If the next BD still needs to be cleaned up, then the bds
2043            are full.  We need to tell the kernel to stop sending us stuff. */
2044         if (!tx_queue->num_txbdfree) {
2045                 netif_tx_stop_queue(txq);
2046
2047                 dev->stats.tx_fifo_errors++;
2048         }
2049
2050         /* Tell the DMA to go go go */
2051         gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
2052
2053         /* Unlock priv */
2054         spin_unlock_irqrestore(&tx_queue->txlock, flags);
2055
2056         return NETDEV_TX_OK;
2057 }
2058
2059 /* Stops the kernel queue, and halts the controller */
2060 static int gfar_close(struct net_device *dev)
2061 {
2062         struct gfar_private *priv = netdev_priv(dev);
2063
2064         disable_napi(priv);
2065
2066         skb_queue_purge(&priv->rx_recycle);
2067         cancel_work_sync(&priv->reset_task);
2068         stop_gfar(dev);
2069
2070         /* Disconnect from the PHY */
2071         phy_disconnect(priv->phydev);
2072         priv->phydev = NULL;
2073
2074         netif_tx_stop_all_queues(dev);
2075
2076         return 0;
2077 }
2078
2079 /* Changes the mac address if the controller is not running. */
2080 static int gfar_set_mac_address(struct net_device *dev)
2081 {
2082         gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
2083
2084         return 0;
2085 }
2086
2087
2088 /* Enables and disables VLAN insertion/extraction */
2089 static void gfar_vlan_rx_register(struct net_device *dev,
2090                 struct vlan_group *grp)
2091 {
2092         struct gfar_private *priv = netdev_priv(dev);
2093         struct gfar __iomem *regs = NULL;
2094         unsigned long flags;
2095         u32 tempval;
2096
2097         regs = priv->gfargrp[0].regs;
2098         local_irq_save(flags);
2099         lock_rx_qs(priv);
2100
2101         priv->vlgrp = grp;
2102
2103         if (grp) {
2104                 /* Enable VLAN tag insertion */
2105                 tempval = gfar_read(&regs->tctrl);
2106                 tempval |= TCTRL_VLINS;
2107
2108                 gfar_write(&regs->tctrl, tempval);
2109
2110                 /* Enable VLAN tag extraction */
2111                 tempval = gfar_read(&regs->rctrl);
2112                 tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
2113                 gfar_write(&regs->rctrl, tempval);
2114         } else {
2115                 /* Disable VLAN tag insertion */
2116                 tempval = gfar_read(&regs->tctrl);
2117                 tempval &= ~TCTRL_VLINS;
2118                 gfar_write(&regs->tctrl, tempval);
2119
2120                 /* Disable VLAN tag extraction */
2121                 tempval = gfar_read(&regs->rctrl);
2122                 tempval &= ~RCTRL_VLEX;
2123                 /* If parse is no longer required, then disable parser */
2124                 if (tempval & RCTRL_REQ_PARSER)
2125                         tempval |= RCTRL_PRSDEP_INIT;
2126                 else
2127                         tempval &= ~RCTRL_PRSDEP_INIT;
2128                 gfar_write(&regs->rctrl, tempval);
2129         }
2130
2131         gfar_change_mtu(dev, dev->mtu);
2132
2133         unlock_rx_qs(priv);
2134         local_irq_restore(flags);
2135 }
2136
2137 static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2138 {
2139         int tempsize, tempval;
2140         struct gfar_private *priv = netdev_priv(dev);
2141         struct gfar __iomem *regs = priv->gfargrp[0].regs;
2142         int oldsize = priv->rx_buffer_size;
2143         int frame_size = new_mtu + ETH_HLEN;
2144
2145         if (priv->vlgrp)
2146                 frame_size += VLAN_HLEN;
2147
2148         if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
2149                 if (netif_msg_drv(priv))
2150                         printk(KERN_ERR "%s: Invalid MTU setting\n",
2151                                         dev->name);
2152                 return -EINVAL;
2153         }
2154
2155         if (gfar_uses_fcb(priv))
2156                 frame_size += GMAC_FCB_LEN;
2157
2158         frame_size += priv->padding;
2159
2160         tempsize =
2161             (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
2162             INCREMENTAL_BUFFER_SIZE;
2163
2164         /* Only stop and start the controller if it isn't already
2165          * stopped, and we changed something */
2166         if ((oldsize != tempsize) && (dev->flags & IFF_UP))
2167                 stop_gfar(dev);
2168
2169         priv->rx_buffer_size = tempsize;
2170
2171         dev->mtu = new_mtu;
2172
2173         gfar_write(&regs->mrblr, priv->rx_buffer_size);
2174         gfar_write(&regs->maxfrm, priv->rx_buffer_size);
2175
2176         /* If the mtu is larger than the max size for standard
2177          * ethernet frames (ie, a jumbo frame), then set maccfg2
2178          * to allow huge frames, and to check the length */
2179         tempval = gfar_read(&regs->maccfg2);
2180
2181         if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE)
2182                 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
2183         else
2184                 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
2185
2186         gfar_write(&regs->maccfg2, tempval);
2187
2188         if ((oldsize != tempsize) && (dev->flags & IFF_UP))
2189                 startup_gfar(dev);
2190
2191         return 0;
2192 }
2193
2194 /* gfar_reset_task gets scheduled when a packet has not been
2195  * transmitted after a set amount of time.
2196  * For now, assume that clearing out all the structures, and
2197  * starting over will fix the problem.
2198  */
2199 static void gfar_reset_task(struct work_struct *work)
2200 {
2201         struct gfar_private *priv = container_of(work, struct gfar_private,
2202                         reset_task);
2203         struct net_device *dev = priv->ndev;
2204
2205         if (dev->flags & IFF_UP) {
2206                 netif_tx_stop_all_queues(dev);
2207                 stop_gfar(dev);
2208                 startup_gfar(dev);
2209                 netif_tx_start_all_queues(dev);
2210         }
2211
2212         netif_tx_schedule_all(dev);
2213 }
2214
2215 static void gfar_timeout(struct net_device *dev)
2216 {
2217         struct gfar_private *priv = netdev_priv(dev);
2218
2219         dev->stats.tx_errors++;
2220         schedule_work(&priv->reset_task);
2221 }
2222
2223 /* Interrupt Handler for Transmit complete */
2224 static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2225 {
2226         struct net_device *dev = tx_queue->dev;
2227         struct gfar_private *priv = netdev_priv(dev);
2228         struct gfar_priv_rx_q *rx_queue = NULL;
2229         struct txbd8 *bdp;
2230         struct txbd8 *lbdp = NULL;
2231         struct txbd8 *base = tx_queue->tx_bd_base;
2232         struct sk_buff *skb;
2233         int skb_dirtytx;
2234         int tx_ring_size = tx_queue->tx_ring_size;
2235         int frags = 0;
2236         int i;
2237         int howmany = 0;
2238         u32 lstatus;
2239
2240         rx_queue = priv->rx_queue[tx_queue->qindex];
2241         bdp = tx_queue->dirty_tx;
2242         skb_dirtytx = tx_queue->skb_dirtytx;
2243
2244         while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
2245                 unsigned long flags;
2246
2247                 frags = skb_shinfo(skb)->nr_frags;
2248                 lbdp = skip_txbd(bdp, frags, base, tx_ring_size);
2249
2250                 lstatus = lbdp->lstatus;
2251
2252                 /* Only clean completed frames */
2253                 if ((lstatus & BD_LFLAG(TXBD_READY)) &&
2254                                 (lstatus & BD_LENGTH_MASK))
2255                         break;
2256
2257                 dma_unmap_single(&priv->ofdev->dev,
2258                                 bdp->bufPtr,
2259                                 bdp->length,
2260                                 DMA_TO_DEVICE);
2261
2262                 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2263                 bdp = next_txbd(bdp, base, tx_ring_size);
2264
2265                 for (i = 0; i < frags; i++) {
2266                         dma_unmap_page(&priv->ofdev->dev,
2267                                         bdp->bufPtr,
2268                                         bdp->length,
2269                                         DMA_TO_DEVICE);
2270                         bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2271                         bdp = next_txbd(bdp, base, tx_ring_size);
2272                 }
2273
2274                 /*
2275                  * If there's room in the queue (limit it to rx_buffer_size)
2276                  * we add this skb back into the pool, if it's the right size
2277                  */
2278                 if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
2279                                 skb_recycle_check(skb, priv->rx_buffer_size +
2280                                         RXBUF_ALIGNMENT))
2281                         __skb_queue_head(&priv->rx_recycle, skb);
2282                 else
2283                         dev_kfree_skb_any(skb);
2284
2285                 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
2286
2287                 skb_dirtytx = (skb_dirtytx + 1) &
2288                         TX_RING_MOD_MASK(tx_ring_size);
2289
2290                 howmany++;
2291                 spin_lock_irqsave(&tx_queue->txlock, flags);
2292                 tx_queue->num_txbdfree += frags + 1;
2293                 spin_unlock_irqrestore(&tx_queue->txlock, flags);
2294         }
2295
2296         /* If we freed a buffer, we can restart transmission, if necessary */
2297         if (__netif_subqueue_stopped(dev, tx_queue->qindex) && tx_queue->num_txbdfree)
2298                 netif_wake_subqueue(dev, tx_queue->qindex);
2299
2300         /* Update dirty indicators */
2301         tx_queue->skb_dirtytx = skb_dirtytx;
2302         tx_queue->dirty_tx = bdp;
2303
2304         dev->stats.tx_packets += howmany;
2305
2306         return howmany;
2307 }
2308
2309 static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
2310 {
2311         unsigned long flags;
2312
2313         spin_lock_irqsave(&gfargrp->grplock, flags);
2314         if (napi_schedule_prep(&gfargrp->napi)) {
2315                 gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
2316                 __napi_schedule(&gfargrp->napi);
2317         } else {
2318                 /*
2319                  * Clear IEVENT, so interrupts aren't called again
2320                  * because of the packets that have already arrived.
2321                  */
2322                 gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
2323         }
2324         spin_unlock_irqrestore(&gfargrp->grplock, flags);
2325
2326 }
2327
2328 /* Interrupt Handler for Transmit complete */
2329 static irqreturn_t gfar_transmit(int irq, void *grp_id)
2330 {
2331         gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
2332         return IRQ_HANDLED;
2333 }
2334
2335 static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
2336                 struct sk_buff *skb)
2337 {
2338         struct net_device *dev = rx_queue->dev;
2339         struct gfar_private *priv = netdev_priv(dev);
2340         dma_addr_t buf;
2341
2342         buf = dma_map_single(&priv->ofdev->dev, skb->data,
2343                              priv->rx_buffer_size, DMA_FROM_DEVICE);
2344         gfar_init_rxbdp(rx_queue, bdp, buf);
2345 }
2346
2347
2348 struct sk_buff * gfar_new_skb(struct net_device *dev)
2349 {
2350         unsigned int alignamount;
2351         struct gfar_private *priv = netdev_priv(dev);
2352         struct sk_buff *skb = NULL;
2353
2354         skb = __skb_dequeue(&priv->rx_recycle);
2355         if (!skb)
2356                 skb = netdev_alloc_skb(dev,
2357                                 priv->rx_buffer_size + RXBUF_ALIGNMENT);
2358
2359         if (!skb)
2360                 return NULL;
2361
2362         alignamount = RXBUF_ALIGNMENT -
2363                 (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1));
2364
2365         /* We need the data buffer to be aligned properly.  We will reserve
2366          * as many bytes as needed to align the data properly
2367          */
2368         skb_reserve(skb, alignamount);
2369
2370         return skb;
2371 }
2372
2373 static inline void count_errors(unsigned short status, struct net_device *dev)
2374 {
2375         struct gfar_private *priv = netdev_priv(dev);
2376         struct net_device_stats *stats = &dev->stats;
2377         struct gfar_extra_stats *estats = &priv->extra_stats;
2378
2379         /* If the packet was truncated, none of the other errors
2380          * matter */
2381         if (status & RXBD_TRUNCATED) {
2382                 stats->rx_length_errors++;
2383
2384                 estats->rx_trunc++;
2385
2386                 return;
2387         }
2388         /* Count the errors, if there were any */
2389         if (status & (RXBD_LARGE | RXBD_SHORT)) {
2390                 stats->rx_length_errors++;
2391
2392                 if (status & RXBD_LARGE)
2393                         estats->rx_large++;
2394                 else
2395                         estats->rx_short++;
2396         }
2397         if (status & RXBD_NONOCTET) {
2398                 stats->rx_frame_errors++;
2399                 estats->rx_nonoctet++;
2400         }
2401         if (status & RXBD_CRCERR) {
2402                 estats->rx_crcerr++;
2403                 stats->rx_crc_errors++;
2404         }
2405         if (status & RXBD_OVERRUN) {
2406                 estats->rx_overrun++;
2407                 stats->rx_crc_errors++;
2408         }
2409 }
2410
2411 irqreturn_t gfar_receive(int irq, void *grp_id)
2412 {
2413         gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
2414         return IRQ_HANDLED;
2415 }
2416
2417 static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2418 {
2419         /* If valid headers were found, and valid sums
2420          * were verified, then we tell the kernel that no
2421          * checksumming is necessary.  Otherwise, it is */
2422         if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
2423                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2424         else
2425                 skb->ip_summed = CHECKSUM_NONE;
2426 }
2427
2428
2429 /* gfar_process_frame() -- handle one incoming packet if skb
2430  * isn't NULL.  */
2431 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2432                               int amount_pull)
2433 {
2434         struct gfar_private *priv = netdev_priv(dev);
2435         struct rxfcb *fcb = NULL;
2436
2437         int ret;
2438
2439         /* fcb is at the beginning if exists */
2440         fcb = (struct rxfcb *)skb->data;
2441
2442         /* Remove the FCB from the skb */
2443         skb_set_queue_mapping(skb, fcb->rq);
2444         /* Remove the padded bytes, if there are any */
2445         if (amount_pull)
2446                 skb_pull(skb, amount_pull);
2447
2448         if (priv->rx_csum_enable)
2449                 gfar_rx_checksum(skb, fcb);
2450
2451         /* Tell the skb what kind of packet this is */
2452         skb->protocol = eth_type_trans(skb, dev);
2453
2454         /* Send the packet up the stack */
2455         if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN)))
2456                 ret = vlan_hwaccel_receive_skb(skb, priv->vlgrp, fcb->vlctl);
2457         else
2458                 ret = netif_receive_skb(skb);
2459
2460         if (NET_RX_DROP == ret)
2461                 priv->extra_stats.kernel_dropped++;
2462
2463         return 0;
2464 }
2465
2466 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
2467  *   until the budget/quota has been reached. Returns the number
2468  *   of frames handled
2469  */
2470 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2471 {
2472         struct net_device *dev = rx_queue->dev;
2473         struct rxbd8 *bdp, *base;
2474         struct sk_buff *skb;
2475         int pkt_len;
2476         int amount_pull;
2477         int howmany = 0;
2478         struct gfar_private *priv = netdev_priv(dev);
2479
2480         /* Get the first full descriptor */
2481         bdp = rx_queue->cur_rx;
2482         base = rx_queue->rx_bd_base;
2483
2484         amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) +
2485                 priv->padding;
2486
2487         while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
2488                 struct sk_buff *newskb;
2489                 rmb();
2490
2491                 /* Add another skb for the future */
2492                 newskb = gfar_new_skb(dev);
2493
2494                 skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
2495
2496                 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
2497                                 priv->rx_buffer_size, DMA_FROM_DEVICE);
2498
2499                 /* We drop the frame if we failed to allocate a new buffer */
2500                 if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
2501                                  bdp->status & RXBD_ERR)) {
2502                         count_errors(bdp->status, dev);
2503
2504                         if (unlikely(!newskb))
2505                                 newskb = skb;
2506                         else if (skb) {
2507                                 /*
2508                                  * We need to reset ->data to what it
2509                                  * was before gfar_new_skb() re-aligned
2510                                  * it to an RXBUF_ALIGNMENT boundary
2511                                  * before we put the skb back on the
2512                                  * recycle list.
2513                                  */
2514                                 skb->data = skb->head + NET_SKB_PAD;
2515                                 __skb_queue_head(&priv->rx_recycle, skb);
2516                         }
2517                 } else {
2518                         /* Increment the number of packets */
2519                         dev->stats.rx_packets++;
2520                         howmany++;
2521
2522                         if (likely(skb)) {
2523                                 pkt_len = bdp->length - ETH_FCS_LEN;
2524                                 /* Remove the FCS from the packet length */
2525                                 skb_put(skb, pkt_len);
2526                                 dev->stats.rx_bytes += pkt_len;
2527
2528                                 gfar_process_frame(dev, skb, amount_pull);
2529
2530                         } else {
2531                                 if (netif_msg_rx_err(priv))
2532                                         printk(KERN_WARNING
2533                                                "%s: Missing skb!\n", dev->name);
2534                                 dev->stats.rx_dropped++;
2535                                 priv->extra_stats.rx_skbmissing++;
2536                         }
2537
2538                 }
2539
2540                 rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
2541
2542                 /* Setup the new bdp */
2543                 gfar_new_rxbdp(rx_queue, bdp, newskb);
2544
2545                 /* Update to the next pointer */
2546                 bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
2547
2548                 /* update to point at the next skb */
2549                 rx_queue->skb_currx =
2550                     (rx_queue->skb_currx + 1) &
2551                     RX_RING_MOD_MASK(rx_queue->rx_ring_size);
2552         }
2553
2554         /* Update the current rxbd pointer to be the next one */
2555         rx_queue->cur_rx = bdp;
2556
2557         return howmany;
2558 }
2559
2560 static int gfar_poll(struct napi_struct *napi, int budget)
2561 {
2562         struct gfar_priv_grp *gfargrp = container_of(napi,
2563                         struct gfar_priv_grp, napi);
2564         struct gfar_private *priv = gfargrp->priv;
2565         struct gfar __iomem *regs = gfargrp->regs;
2566         struct gfar_priv_tx_q *tx_queue = NULL;
2567         struct gfar_priv_rx_q *rx_queue = NULL;
2568         int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0;
2569         int tx_cleaned = 0, i, left_over_budget = budget;
2570         unsigned long serviced_queues = 0;
2571         int num_queues = 0;
2572
2573         num_queues = gfargrp->num_rx_queues;
2574         budget_per_queue = budget/num_queues;
2575
2576         /* Clear IEVENT, so interrupts aren't called again
2577          * because of the packets that have already arrived */
2578         gfar_write(&regs->ievent, IEVENT_RTX_MASK);
2579
2580         while (num_queues && left_over_budget) {
2581
2582                 budget_per_queue = left_over_budget/num_queues;
2583                 left_over_budget = 0;
2584
2585                 for_each_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
2586                         if (test_bit(i, &serviced_queues))
2587                                 continue;
2588                         rx_queue = priv->rx_queue[i];
2589                         tx_queue = priv->tx_queue[rx_queue->qindex];
2590
2591                         tx_cleaned += gfar_clean_tx_ring(tx_queue);
2592                         rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue,
2593                                                         budget_per_queue);
2594                         rx_cleaned += rx_cleaned_per_queue;
2595                         if(rx_cleaned_per_queue < budget_per_queue) {
2596                                 left_over_budget = left_over_budget +
2597                                         (budget_per_queue - rx_cleaned_per_queue);
2598                                 set_bit(i, &serviced_queues);
2599                                 num_queues--;
2600                         }
2601                 }
2602         }
2603
2604         if (tx_cleaned)
2605                 return budget;
2606
2607         if (rx_cleaned < budget) {
2608                 napi_complete(napi);
2609
2610                 /* Clear the halt bit in RSTAT */
2611                 gfar_write(&regs->rstat, gfargrp->rstat);
2612
2613                 gfar_write(&regs->imask, IMASK_DEFAULT);
2614
2615                 /* If we are coalescing interrupts, update the timer */
2616                 /* Otherwise, clear it */
2617                 gfar_configure_coalescing(priv,
2618                                 gfargrp->rx_bit_map, gfargrp->tx_bit_map);
2619         }
2620
2621         return rx_cleaned;
2622 }
2623
2624 #ifdef CONFIG_NET_POLL_CONTROLLER
2625 /*
2626  * Polling 'interrupt' - used by things like netconsole to send skbs
2627  * without having to re-enable interrupts. It's not called while
2628  * the interrupt routine is executing.
2629  */
2630 static void gfar_netpoll(struct net_device *dev)
2631 {
2632         struct gfar_private *priv = netdev_priv(dev);
2633         int i = 0;
2634
2635         /* If the device has multiple interrupts, run tx/rx */
2636         if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2637                 for (i = 0; i < priv->num_grps; i++) {
2638                         disable_irq(priv->gfargrp[i].interruptTransmit);
2639                         disable_irq(priv->gfargrp[i].interruptReceive);
2640                         disable_irq(priv->gfargrp[i].interruptError);
2641                         gfar_interrupt(priv->gfargrp[i].interruptTransmit,
2642                                                 &priv->gfargrp[i]);
2643                         enable_irq(priv->gfargrp[i].interruptError);
2644                         enable_irq(priv->gfargrp[i].interruptReceive);
2645                         enable_irq(priv->gfargrp[i].interruptTransmit);
2646                 }
2647         } else {
2648                 for (i = 0; i < priv->num_grps; i++) {
2649                         disable_irq(priv->gfargrp[i].interruptTransmit);
2650                         gfar_interrupt(priv->gfargrp[i].interruptTransmit,
2651                                                 &priv->gfargrp[i]);
2652                         enable_irq(priv->gfargrp[i].interruptTransmit);
2653                 }
2654         }
2655 }
2656 #endif
2657
2658 /* The interrupt handler for devices with one interrupt */
2659 static irqreturn_t gfar_interrupt(int irq, void *grp_id)
2660 {
2661         struct gfar_priv_grp *gfargrp = grp_id;
2662
2663         /* Save ievent for future reference */
2664         u32 events = gfar_read(&gfargrp->regs->ievent);
2665
2666         /* Check for reception */
2667         if (events & IEVENT_RX_MASK)
2668                 gfar_receive(irq, grp_id);
2669
2670         /* Check for transmit completion */
2671         if (events & IEVENT_TX_MASK)
2672                 gfar_transmit(irq, grp_id);
2673
2674         /* Check for errors */
2675         if (events & IEVENT_ERR_MASK)
2676                 gfar_error(irq, grp_id);
2677
2678         return IRQ_HANDLED;
2679 }
2680
2681 /* Called every time the controller might need to be made
2682  * aware of new link state.  The PHY code conveys this
2683  * information through variables in the phydev structure, and this
2684  * function converts those variables into the appropriate
2685  * register values, and can bring down the device if needed.
2686  */
2687 static void adjust_link(struct net_device *dev)
2688 {
2689         struct gfar_private *priv = netdev_priv(dev);
2690         struct gfar __iomem *regs = priv->gfargrp[0].regs;
2691         unsigned long flags;
2692         struct phy_device *phydev = priv->phydev;
2693         int new_state = 0;
2694
2695         local_irq_save(flags);
2696         lock_tx_qs(priv);
2697
2698         if (phydev->link) {
2699                 u32 tempval = gfar_read(&regs->maccfg2);
2700                 u32 ecntrl = gfar_read(&regs->ecntrl);
2701
2702                 /* Now we make sure that we can be in full duplex mode.
2703                  * If not, we operate in half-duplex mode. */
2704                 if (phydev->duplex != priv->oldduplex) {
2705                         new_state = 1;
2706                         if (!(phydev->duplex))
2707                                 tempval &= ~(MACCFG2_FULL_DUPLEX);
2708                         else
2709                                 tempval |= MACCFG2_FULL_DUPLEX;
2710
2711                         priv->oldduplex = phydev->duplex;
2712                 }
2713
2714                 if (phydev->speed != priv->oldspeed) {
2715                         new_state = 1;
2716                         switch (phydev->speed) {
2717                         case 1000:
2718                                 tempval =
2719                                     ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
2720
2721                                 ecntrl &= ~(ECNTRL_R100);
2722                                 break;
2723                         case 100:
2724                         case 10:
2725                                 tempval =
2726                                     ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
2727
2728                                 /* Reduced mode distinguishes
2729                                  * between 10 and 100 */
2730                                 if (phydev->speed == SPEED_100)
2731                                         ecntrl |= ECNTRL_R100;
2732                                 else
2733                                         ecntrl &= ~(ECNTRL_R100);
2734                                 break;
2735                         default:
2736                                 if (netif_msg_link(priv))
2737                                         printk(KERN_WARNING
2738                                                 "%s: Ack!  Speed (%d) is not 10/100/1000!\n",
2739                                                 dev->name, phydev->speed);
2740                                 break;
2741                         }
2742
2743                         priv->oldspeed = phydev->speed;
2744                 }
2745
2746                 gfar_write(&regs->maccfg2, tempval);
2747                 gfar_write(&regs->ecntrl, ecntrl);
2748
2749                 if (!priv->oldlink) {
2750                         new_state = 1;
2751                         priv->oldlink = 1;
2752                 }
2753         } else if (priv->oldlink) {
2754                 new_state = 1;
2755                 priv->oldlink = 0;
2756                 priv->oldspeed = 0;
2757                 priv->oldduplex = -1;
2758         }
2759
2760         if (new_state && netif_msg_link(priv))
2761                 phy_print_status(phydev);
2762         unlock_tx_qs(priv);
2763         local_irq_restore(flags);
2764 }
2765
2766 /* Update the hash table based on the current list of multicast
2767  * addresses we subscribe to.  Also, change the promiscuity of
2768  * the device based on the flags (this function is called
2769  * whenever dev->flags is changed */
2770 static void gfar_set_multi(struct net_device *dev)
2771 {
2772         struct dev_mc_list *mc_ptr;
2773         struct gfar_private *priv = netdev_priv(dev);
2774         struct gfar __iomem *regs = priv->gfargrp[0].regs;
2775         u32 tempval;
2776
2777         if (dev->flags & IFF_PROMISC) {
2778                 /* Set RCTRL to PROM */
2779                 tempval = gfar_read(&regs->rctrl);
2780                 tempval |= RCTRL_PROM;
2781                 gfar_write(&regs->rctrl, tempval);
2782         } else {
2783                 /* Set RCTRL to not PROM */
2784                 tempval = gfar_read(&regs->rctrl);
2785                 tempval &= ~(RCTRL_PROM);
2786                 gfar_write(&regs->rctrl, tempval);
2787         }
2788
2789         if (dev->flags & IFF_ALLMULTI) {
2790                 /* Set the hash to rx all multicast frames */
2791                 gfar_write(&regs->igaddr0, 0xffffffff);
2792                 gfar_write(&regs->igaddr1, 0xffffffff);
2793                 gfar_write(&regs->igaddr2, 0xffffffff);
2794                 gfar_write(&regs->igaddr3, 0xffffffff);
2795                 gfar_write(&regs->igaddr4, 0xffffffff);
2796                 gfar_write(&regs->igaddr5, 0xffffffff);
2797                 gfar_write(&regs->igaddr6, 0xffffffff);
2798                 gfar_write(&regs->igaddr7, 0xffffffff);
2799                 gfar_write(&regs->gaddr0, 0xffffffff);
2800                 gfar_write(&regs->gaddr1, 0xffffffff);
2801                 gfar_write(&regs->gaddr2, 0xffffffff);
2802                 gfar_write(&regs->gaddr3, 0xffffffff);
2803                 gfar_write(&regs->gaddr4, 0xffffffff);
2804                 gfar_write(&regs->gaddr5, 0xffffffff);
2805                 gfar_write(&regs->gaddr6, 0xffffffff);
2806                 gfar_write(&regs->gaddr7, 0xffffffff);
2807         } else {
2808                 int em_num;
2809                 int idx;
2810
2811                 /* zero out the hash */
2812                 gfar_write(&regs->igaddr0, 0x0);
2813                 gfar_write(&regs->igaddr1, 0x0);
2814                 gfar_write(&regs->igaddr2, 0x0);
2815                 gfar_write(&regs->igaddr3, 0x0);
2816                 gfar_write(&regs->igaddr4, 0x0);
2817                 gfar_write(&regs->igaddr5, 0x0);
2818                 gfar_write(&regs->igaddr6, 0x0);
2819                 gfar_write(&regs->igaddr7, 0x0);
2820                 gfar_write(&regs->gaddr0, 0x0);
2821                 gfar_write(&regs->gaddr1, 0x0);
2822                 gfar_write(&regs->gaddr2, 0x0);
2823                 gfar_write(&regs->gaddr3, 0x0);
2824                 gfar_write(&regs->gaddr4, 0x0);
2825                 gfar_write(&regs->gaddr5, 0x0);
2826                 gfar_write(&regs->gaddr6, 0x0);
2827                 gfar_write(&regs->gaddr7, 0x0);
2828
2829                 /* If we have extended hash tables, we need to
2830                  * clear the exact match registers to prepare for
2831                  * setting them */
2832                 if (priv->extended_hash) {
2833                         em_num = GFAR_EM_NUM + 1;
2834                         gfar_clear_exact_match(dev);
2835                         idx = 1;
2836                 } else {
2837                         idx = 0;
2838                         em_num = 0;
2839                 }
2840
2841                 if (dev->mc_count == 0)
2842                         return;
2843
2844                 /* Parse the list, and set the appropriate bits */
2845                 for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
2846                         if (idx < em_num) {
2847                                 gfar_set_mac_for_addr(dev, idx,
2848                                                 mc_ptr->dmi_addr);
2849                                 idx++;
2850                         } else
2851                                 gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr);
2852                 }
2853         }
2854
2855         return;
2856 }
2857
2858
2859 /* Clears each of the exact match registers to zero, so they
2860  * don't interfere with normal reception */
2861 static void gfar_clear_exact_match(struct net_device *dev)
2862 {
2863         int idx;
2864         u8 zero_arr[MAC_ADDR_LEN] = {0,0,0,0,0,0};
2865
2866         for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
2867                 gfar_set_mac_for_addr(dev, idx, (u8 *)zero_arr);
2868 }
2869
2870 /* Set the appropriate hash bit for the given addr */
2871 /* The algorithm works like so:
2872  * 1) Take the Destination Address (ie the multicast address), and
2873  * do a CRC on it (little endian), and reverse the bits of the
2874  * result.
2875  * 2) Use the 8 most significant bits as a hash into a 256-entry
2876  * table.  The table is controlled through 8 32-bit registers:
2877  * gaddr0-7.  gaddr0's MSB is entry 0, and gaddr7's LSB is
2878  * gaddr7.  This means that the 3 most significant bits in the
2879  * hash index which gaddr register to use, and the 5 other bits
2880  * indicate which bit (assuming an IBM numbering scheme, which
2881  * for PowerPC (tm) is usually the case) in the register holds
2882  * the entry. */
2883 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
2884 {
2885         u32 tempval;
2886         struct gfar_private *priv = netdev_priv(dev);
2887         u32 result = ether_crc(MAC_ADDR_LEN, addr);
2888         int width = priv->hash_width;
2889         u8 whichbit = (result >> (32 - width)) & 0x1f;
2890         u8 whichreg = result >> (32 - width + 5);
2891         u32 value = (1 << (31-whichbit));
2892
2893         tempval = gfar_read(priv->hash_regs[whichreg]);
2894         tempval |= value;
2895         gfar_write(priv->hash_regs[whichreg], tempval);
2896
2897         return;
2898 }
2899
2900
2901 /* There are multiple MAC Address register pairs on some controllers
2902  * This function sets the numth pair to a given address
2903  */
2904 static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr)
2905 {
2906         struct gfar_private *priv = netdev_priv(dev);
2907         struct gfar __iomem *regs = priv->gfargrp[0].regs;
2908         int idx;
2909         char tmpbuf[MAC_ADDR_LEN];
2910         u32 tempval;
2911         u32 __iomem *macptr = &regs->macstnaddr1;
2912
2913         macptr += num*2;
2914
2915         /* Now copy it into the mac registers backwards, cuz */
2916         /* little endian is silly */
2917         for (idx = 0; idx < MAC_ADDR_LEN; idx++)
2918                 tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx];
2919
2920         gfar_write(macptr, *((u32 *) (tmpbuf)));
2921
2922         tempval = *((u32 *) (tmpbuf + 4));
2923
2924         gfar_write(macptr+1, tempval);
2925 }
2926
2927 /* GFAR error interrupt handler */
2928 static irqreturn_t gfar_error(int irq, void *grp_id)
2929 {
2930         struct gfar_priv_grp *gfargrp = grp_id;
2931         struct gfar __iomem *regs = gfargrp->regs;
2932         struct gfar_private *priv= gfargrp->priv;
2933         struct net_device *dev = priv->ndev;
2934
2935         /* Save ievent for future reference */
2936         u32 events = gfar_read(&regs->ievent);
2937
2938         /* Clear IEVENT */
2939         gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
2940
2941         /* Magic Packet is not an error. */
2942         if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
2943             (events & IEVENT_MAG))
2944                 events &= ~IEVENT_MAG;
2945
2946         /* Hmm... */
2947         if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
2948                 printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n",
2949                        dev->name, events, gfar_read(&regs->imask));
2950
2951         /* Update the error counters */
2952         if (events & IEVENT_TXE) {
2953                 dev->stats.tx_errors++;
2954
2955                 if (events & IEVENT_LC)
2956                         dev->stats.tx_window_errors++;
2957                 if (events & IEVENT_CRL)
2958                         dev->stats.tx_aborted_errors++;
2959                 if (events & IEVENT_XFUN) {
2960                         unsigned long flags;
2961
2962                         if (netif_msg_tx_err(priv))
2963                                 printk(KERN_DEBUG "%s: TX FIFO underrun, "
2964                                        "packet dropped.\n", dev->name);
2965                         dev->stats.tx_dropped++;
2966                         priv->extra_stats.tx_underrun++;
2967
2968                         local_irq_save(flags);
2969                         lock_tx_qs(priv);
2970
2971                         /* Reactivate the Tx Queues */
2972                         gfar_write(&regs->tstat, gfargrp->tstat);
2973
2974                         unlock_tx_qs(priv);
2975                         local_irq_restore(flags);
2976                 }
2977                 if (netif_msg_tx_err(priv))
2978                         printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
2979         }
2980         if (events & IEVENT_BSY) {
2981                 dev->stats.rx_errors++;
2982                 priv->extra_stats.rx_bsy++;
2983
2984                 gfar_receive(irq, grp_id);
2985
2986                 if (netif_msg_rx_err(priv))
2987                         printk(KERN_DEBUG "%s: busy error (rstat: %x)\n",
2988                                dev->name, gfar_read(&regs->rstat));
2989         }
2990         if (events & IEVENT_BABR) {
2991                 dev->stats.rx_errors++;
2992                 priv->extra_stats.rx_babr++;
2993
2994                 if (netif_msg_rx_err(priv))
2995                         printk(KERN_DEBUG "%s: babbling RX error\n", dev->name);
2996         }
2997         if (events & IEVENT_EBERR) {
2998                 priv->extra_stats.eberr++;
2999                 if (netif_msg_rx_err(priv))
3000                         printk(KERN_DEBUG "%s: bus error\n", dev->name);
3001         }
3002         if ((events & IEVENT_RXC) && netif_msg_rx_status(priv))
3003                 printk(KERN_DEBUG "%s: control frame\n", dev->name);
3004
3005         if (events & IEVENT_BABT) {
3006                 priv->extra_stats.tx_babt++;
3007                 if (netif_msg_tx_err(priv))
3008                         printk(KERN_DEBUG "%s: babbling TX error\n", dev->name);
3009         }
3010         return IRQ_HANDLED;
3011 }
3012
3013 static struct of_device_id gfar_match[] =
3014 {
3015         {
3016                 .type = "network",
3017                 .compatible = "gianfar",
3018         },
3019         {
3020                 .compatible = "fsl,etsec2",
3021         },
3022         {},
3023 };
3024 MODULE_DEVICE_TABLE(of, gfar_match);
3025
3026 /* Structure for a device driver */
3027 static struct of_platform_driver gfar_driver = {
3028         .name = "fsl-gianfar",
3029         .match_table = gfar_match,
3030
3031         .probe = gfar_probe,
3032         .remove = gfar_remove,
3033         .suspend = gfar_legacy_suspend,
3034         .resume = gfar_legacy_resume,
3035         .driver.pm = GFAR_PM_OPS,
3036 };
3037
3038 static int __init gfar_init(void)
3039 {
3040         return of_register_platform_driver(&gfar_driver);
3041 }
3042
3043 static void __exit gfar_exit(void)
3044 {
3045         of_unregister_platform_driver(&gfar_driver);
3046 }
3047
3048 module_init(gfar_init);
3049 module_exit(gfar_exit);
3050