netdev: Add netdev->addr_list_lock protection.
[linux-2.6.git] / drivers / net / ibm_newemac / core.c
1 /*
2  * drivers/net/ibm_newemac/core.c
3  *
4  * Driver for PowerPC 4xx on-chip ethernet controller.
5  *
6  * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7  *                <benh@kernel.crashing.org>
8  *
9  * Based on the arch/ppc version of the driver:
10  *
11  * Copyright (c) 2004, 2005 Zultys Technologies.
12  * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
13  *
14  * Based on original work by
15  *      Matt Porter <mporter@kernel.crashing.org>
16  *      (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
17  *      Armin Kuster <akuster@mvista.com>
18  *      Johnnie Peters <jpeters@mvista.com>
19  *
20  * This program is free software; you can redistribute  it and/or modify it
21  * under  the terms of  the GNU General  Public License as published by the
22  * Free Software Foundation;  either version 2 of the  License, or (at your
23  * option) any later version.
24  *
25  */
26
27 #include <linux/sched.h>
28 #include <linux/string.h>
29 #include <linux/errno.h>
30 #include <linux/delay.h>
31 #include <linux/types.h>
32 #include <linux/pci.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/crc32.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #include <linux/bitops.h>
39 #include <linux/workqueue.h>
40 #include <linux/of.h>
41
42 #include <asm/processor.h>
43 #include <asm/io.h>
44 #include <asm/dma.h>
45 #include <asm/uaccess.h>
46 #include <asm/dcr.h>
47 #include <asm/dcr-regs.h>
48
49 #include "core.h"
50
51 /*
52  * Lack of dma_unmap_???? calls is intentional.
53  *
54  * API-correct usage requires additional support state information to be
55  * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
56  * EMAC design (e.g. TX buffer passed from network stack can be split into
57  * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
58  * maintaining such information will add additional overhead.
59  * Current DMA API implementation for 4xx processors only ensures cache coherency
60  * and dma_unmap_???? routines are empty and are likely to stay this way.
61  * I decided to omit dma_unmap_??? calls because I don't want to add additional
62  * complexity just for the sake of following some abstract API, when it doesn't
63  * add any real benefit to the driver. I understand that this decision maybe
64  * controversial, but I really tried to make code API-correct and efficient
65  * at the same time and didn't come up with code I liked :(.                --ebs
66  */
67
68 #define DRV_NAME        "emac"
69 #define DRV_VERSION     "3.54"
70 #define DRV_DESC        "PPC 4xx OCP EMAC driver"
71
72 MODULE_DESCRIPTION(DRV_DESC);
73 MODULE_AUTHOR
74     ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
75 MODULE_LICENSE("GPL");
76
77 /*
78  * PPC64 doesn't (yet) have a cacheable_memcpy
79  */
80 #ifdef CONFIG_PPC64
81 #define cacheable_memcpy(d,s,n) memcpy((d),(s),(n))
82 #endif
83
84 /* minimum number of free TX descriptors required to wake up TX process */
85 #define EMAC_TX_WAKEUP_THRESH           (NUM_TX_BUFF / 4)
86
87 /* If packet size is less than this number, we allocate small skb and copy packet
88  * contents into it instead of just sending original big skb up
89  */
90 #define EMAC_RX_COPY_THRESH             CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD
91
92 /* Since multiple EMACs share MDIO lines in various ways, we need
93  * to avoid re-using the same PHY ID in cases where the arch didn't
94  * setup precise phy_map entries
95  *
96  * XXX This is something that needs to be reworked as we can have multiple
97  * EMAC "sets" (multiple ASICs containing several EMACs) though we can
98  * probably require in that case to have explicit PHY IDs in the device-tree
99  */
100 static u32 busy_phy_map;
101 static DEFINE_MUTEX(emac_phy_map_lock);
102
103 /* This is the wait queue used to wait on any event related to probe, that
104  * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
105  */
106 static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
107
108 /* Having stable interface names is a doomed idea. However, it would be nice
109  * if we didn't have completely random interface names at boot too :-) It's
110  * just a matter of making everybody's life easier. Since we are doing
111  * threaded probing, it's a bit harder though. The base idea here is that
112  * we make up a list of all emacs in the device-tree before we register the
113  * driver. Every emac will then wait for the previous one in the list to
114  * initialize before itself. We should also keep that list ordered by
115  * cell_index.
116  * That list is only 4 entries long, meaning that additional EMACs don't
117  * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
118  */
119
120 #define EMAC_BOOT_LIST_SIZE     4
121 static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
122
123 /* How long should I wait for dependent devices ? */
124 #define EMAC_PROBE_DEP_TIMEOUT  (HZ * 5)
125
126 /* I don't want to litter system log with timeout errors
127  * when we have brain-damaged PHY.
128  */
129 static inline void emac_report_timeout_error(struct emac_instance *dev,
130                                              const char *error)
131 {
132         if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX |
133                                   EMAC_FTR_440EP_PHY_CLK_FIX))
134                 DBG(dev, "%s" NL, error);
135         else if (net_ratelimit())
136                 printk(KERN_ERR "%s: %s\n", dev->ndev->name, error);
137 }
138
139 /* EMAC PHY clock workaround:
140  * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX,
141  * which allows controlling each EMAC clock
142  */
143 static inline void emac_rx_clk_tx(struct emac_instance *dev)
144 {
145 #ifdef CONFIG_PPC_DCR_NATIVE
146         if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
147                 dcri_clrset(SDR0, SDR0_MFR,
148                             0, SDR0_MFR_ECS >> dev->cell_index);
149 #endif
150 }
151
152 static inline void emac_rx_clk_default(struct emac_instance *dev)
153 {
154 #ifdef CONFIG_PPC_DCR_NATIVE
155         if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
156                 dcri_clrset(SDR0, SDR0_MFR,
157                             SDR0_MFR_ECS >> dev->cell_index, 0);
158 #endif
159 }
160
161 /* PHY polling intervals */
162 #define PHY_POLL_LINK_ON        HZ
163 #define PHY_POLL_LINK_OFF       (HZ / 5)
164
165 /* Graceful stop timeouts in us.
166  * We should allow up to 1 frame time (full-duplex, ignoring collisions)
167  */
168 #define STOP_TIMEOUT_10         1230
169 #define STOP_TIMEOUT_100        124
170 #define STOP_TIMEOUT_1000       13
171 #define STOP_TIMEOUT_1000_JUMBO 73
172
173 static unsigned char default_mcast_addr[] = {
174         0x01, 0x80, 0xC2, 0x00, 0x00, 0x01
175 };
176
177 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
178 static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
179         "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
180         "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
181         "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
182         "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
183         "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
184         "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
185         "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
186         "rx_bad_packet", "rx_runt_packet", "rx_short_event",
187         "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
188         "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
189         "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
190         "tx_bd_excessive_collisions", "tx_bd_late_collision",
191         "tx_bd_multple_collisions", "tx_bd_single_collision",
192         "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
193         "tx_errors"
194 };
195
196 static irqreturn_t emac_irq(int irq, void *dev_instance);
197 static void emac_clean_tx_ring(struct emac_instance *dev);
198 static void __emac_set_multicast_list(struct emac_instance *dev);
199
200 static inline int emac_phy_supports_gige(int phy_mode)
201 {
202         return  phy_mode == PHY_MODE_GMII ||
203                 phy_mode == PHY_MODE_RGMII ||
204                 phy_mode == PHY_MODE_TBI ||
205                 phy_mode == PHY_MODE_RTBI;
206 }
207
208 static inline int emac_phy_gpcs(int phy_mode)
209 {
210         return  phy_mode == PHY_MODE_TBI ||
211                 phy_mode == PHY_MODE_RTBI;
212 }
213
214 static inline void emac_tx_enable(struct emac_instance *dev)
215 {
216         struct emac_regs __iomem *p = dev->emacp;
217         u32 r;
218
219         DBG(dev, "tx_enable" NL);
220
221         r = in_be32(&p->mr0);
222         if (!(r & EMAC_MR0_TXE))
223                 out_be32(&p->mr0, r | EMAC_MR0_TXE);
224 }
225
226 static void emac_tx_disable(struct emac_instance *dev)
227 {
228         struct emac_regs __iomem *p = dev->emacp;
229         u32 r;
230
231         DBG(dev, "tx_disable" NL);
232
233         r = in_be32(&p->mr0);
234         if (r & EMAC_MR0_TXE) {
235                 int n = dev->stop_timeout;
236                 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
237                 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
238                         udelay(1);
239                         --n;
240                 }
241                 if (unlikely(!n))
242                         emac_report_timeout_error(dev, "TX disable timeout");
243         }
244 }
245
246 static void emac_rx_enable(struct emac_instance *dev)
247 {
248         struct emac_regs __iomem *p = dev->emacp;
249         u32 r;
250
251         if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
252                 goto out;
253
254         DBG(dev, "rx_enable" NL);
255
256         r = in_be32(&p->mr0);
257         if (!(r & EMAC_MR0_RXE)) {
258                 if (unlikely(!(r & EMAC_MR0_RXI))) {
259                         /* Wait if previous async disable is still in progress */
260                         int n = dev->stop_timeout;
261                         while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
262                                 udelay(1);
263                                 --n;
264                         }
265                         if (unlikely(!n))
266                                 emac_report_timeout_error(dev,
267                                                           "RX disable timeout");
268                 }
269                 out_be32(&p->mr0, r | EMAC_MR0_RXE);
270         }
271  out:
272         ;
273 }
274
275 static void emac_rx_disable(struct emac_instance *dev)
276 {
277         struct emac_regs __iomem *p = dev->emacp;
278         u32 r;
279
280         DBG(dev, "rx_disable" NL);
281
282         r = in_be32(&p->mr0);
283         if (r & EMAC_MR0_RXE) {
284                 int n = dev->stop_timeout;
285                 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
286                 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
287                         udelay(1);
288                         --n;
289                 }
290                 if (unlikely(!n))
291                         emac_report_timeout_error(dev, "RX disable timeout");
292         }
293 }
294
295 static inline void emac_netif_stop(struct emac_instance *dev)
296 {
297         netif_tx_lock_bh(dev->ndev);
298         netif_addr_lock(dev->ndev);
299         dev->no_mcast = 1;
300         netif_addr_unlock(dev->ndev);
301         netif_tx_unlock_bh(dev->ndev);
302         dev->ndev->trans_start = jiffies;       /* prevent tx timeout */
303         mal_poll_disable(dev->mal, &dev->commac);
304         netif_tx_disable(dev->ndev);
305 }
306
307 static inline void emac_netif_start(struct emac_instance *dev)
308 {
309         netif_tx_lock_bh(dev->ndev);
310         netif_addr_lock(dev->ndev);
311         dev->no_mcast = 0;
312         if (dev->mcast_pending && netif_running(dev->ndev))
313                 __emac_set_multicast_list(dev);
314         netif_addr_unlock(dev->ndev);
315         netif_tx_unlock_bh(dev->ndev);
316
317         netif_wake_queue(dev->ndev);
318
319         /* NOTE: unconditional netif_wake_queue is only appropriate
320          * so long as all callers are assured to have free tx slots
321          * (taken from tg3... though the case where that is wrong is
322          *  not terribly harmful)
323          */
324         mal_poll_enable(dev->mal, &dev->commac);
325 }
326
327 static inline void emac_rx_disable_async(struct emac_instance *dev)
328 {
329         struct emac_regs __iomem *p = dev->emacp;
330         u32 r;
331
332         DBG(dev, "rx_disable_async" NL);
333
334         r = in_be32(&p->mr0);
335         if (r & EMAC_MR0_RXE)
336                 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
337 }
338
339 static int emac_reset(struct emac_instance *dev)
340 {
341         struct emac_regs __iomem *p = dev->emacp;
342         int n = 20;
343
344         DBG(dev, "reset" NL);
345
346         if (!dev->reset_failed) {
347                 /* 40x erratum suggests stopping RX channel before reset,
348                  * we stop TX as well
349                  */
350                 emac_rx_disable(dev);
351                 emac_tx_disable(dev);
352         }
353
354         out_be32(&p->mr0, EMAC_MR0_SRST);
355         while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
356                 --n;
357
358         if (n) {
359                 dev->reset_failed = 0;
360                 return 0;
361         } else {
362                 emac_report_timeout_error(dev, "reset timeout");
363                 dev->reset_failed = 1;
364                 return -ETIMEDOUT;
365         }
366 }
367
368 static void emac_hash_mc(struct emac_instance *dev)
369 {
370         struct emac_regs __iomem *p = dev->emacp;
371         u16 gaht[4] = { 0 };
372         struct dev_mc_list *dmi;
373
374         DBG(dev, "hash_mc %d" NL, dev->ndev->mc_count);
375
376         for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
377                 int bit;
378                 DBG2(dev, "mc %02x:%02x:%02x:%02x:%02x:%02x" NL,
379                      dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
380                      dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]);
381
382                 bit = 63 - (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 26);
383                 gaht[bit >> 4] |= 0x8000 >> (bit & 0x0f);
384         }
385         out_be32(&p->gaht1, gaht[0]);
386         out_be32(&p->gaht2, gaht[1]);
387         out_be32(&p->gaht3, gaht[2]);
388         out_be32(&p->gaht4, gaht[3]);
389 }
390
391 static inline u32 emac_iff2rmr(struct net_device *ndev)
392 {
393         struct emac_instance *dev = netdev_priv(ndev);
394         u32 r;
395
396         r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
397
398         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
399             r |= EMAC4_RMR_BASE;
400         else
401             r |= EMAC_RMR_BASE;
402
403         if (ndev->flags & IFF_PROMISC)
404                 r |= EMAC_RMR_PME;
405         else if (ndev->flags & IFF_ALLMULTI || ndev->mc_count > 32)
406                 r |= EMAC_RMR_PMME;
407         else if (ndev->mc_count > 0)
408                 r |= EMAC_RMR_MAE;
409
410         return r;
411 }
412
413 static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
414 {
415         u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
416
417         DBG2(dev, "__emac_calc_base_mr1" NL);
418
419         switch(tx_size) {
420         case 2048:
421                 ret |= EMAC_MR1_TFS_2K;
422                 break;
423         default:
424                 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
425                        dev->ndev->name, tx_size);
426         }
427
428         switch(rx_size) {
429         case 16384:
430                 ret |= EMAC_MR1_RFS_16K;
431                 break;
432         case 4096:
433                 ret |= EMAC_MR1_RFS_4K;
434                 break;
435         default:
436                 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
437                        dev->ndev->name, rx_size);
438         }
439
440         return ret;
441 }
442
443 static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
444 {
445         u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
446                 EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000);
447
448         DBG2(dev, "__emac4_calc_base_mr1" NL);
449
450         switch(tx_size) {
451         case 4096:
452                 ret |= EMAC4_MR1_TFS_4K;
453                 break;
454         case 2048:
455                 ret |= EMAC4_MR1_TFS_2K;
456                 break;
457         default:
458                 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
459                        dev->ndev->name, tx_size);
460         }
461
462         switch(rx_size) {
463         case 16384:
464                 ret |= EMAC4_MR1_RFS_16K;
465                 break;
466         case 4096:
467                 ret |= EMAC4_MR1_RFS_4K;
468                 break;
469         case 2048:
470                 ret |= EMAC4_MR1_RFS_2K;
471                 break;
472         default:
473                 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
474                        dev->ndev->name, rx_size);
475         }
476
477         return ret;
478 }
479
480 static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
481 {
482         return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
483                 __emac4_calc_base_mr1(dev, tx_size, rx_size) :
484                 __emac_calc_base_mr1(dev, tx_size, rx_size);
485 }
486
487 static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
488 {
489         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
490                 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
491         else
492                 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
493 }
494
495 static inline u32 emac_calc_rwmr(struct emac_instance *dev,
496                                  unsigned int low, unsigned int high)
497 {
498         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
499                 return (low << 22) | ( (high & 0x3ff) << 6);
500         else
501                 return (low << 23) | ( (high & 0x1ff) << 7);
502 }
503
504 static int emac_configure(struct emac_instance *dev)
505 {
506         struct emac_regs __iomem *p = dev->emacp;
507         struct net_device *ndev = dev->ndev;
508         int tx_size, rx_size, link = netif_carrier_ok(dev->ndev);
509         u32 r, mr1 = 0;
510
511         DBG(dev, "configure" NL);
512
513         if (!link) {
514                 out_be32(&p->mr1, in_be32(&p->mr1)
515                          | EMAC_MR1_FDE | EMAC_MR1_ILE);
516                 udelay(100);
517         } else if (emac_reset(dev) < 0)
518                 return -ETIMEDOUT;
519
520         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
521                 tah_reset(dev->tah_dev);
522
523         DBG(dev, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
524             link, dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
525
526         /* Default fifo sizes */
527         tx_size = dev->tx_fifo_size;
528         rx_size = dev->rx_fifo_size;
529
530         /* No link, force loopback */
531         if (!link)
532                 mr1 = EMAC_MR1_FDE | EMAC_MR1_ILE;
533
534         /* Check for full duplex */
535         else if (dev->phy.duplex == DUPLEX_FULL)
536                 mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
537
538         /* Adjust fifo sizes, mr1 and timeouts based on link speed */
539         dev->stop_timeout = STOP_TIMEOUT_10;
540         switch (dev->phy.speed) {
541         case SPEED_1000:
542                 if (emac_phy_gpcs(dev->phy.mode)) {
543                         mr1 |= EMAC_MR1_MF_1000GPCS |
544                                 EMAC_MR1_MF_IPPA(dev->phy.address);
545
546                         /* Put some arbitrary OUI, Manuf & Rev IDs so we can
547                          * identify this GPCS PHY later.
548                          */
549                         out_be32(&p->ipcr, 0xdeadbeef);
550                 } else
551                         mr1 |= EMAC_MR1_MF_1000;
552
553                 /* Extended fifo sizes */
554                 tx_size = dev->tx_fifo_size_gige;
555                 rx_size = dev->rx_fifo_size_gige;
556
557                 if (dev->ndev->mtu > ETH_DATA_LEN) {
558                         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
559                                 mr1 |= EMAC4_MR1_JPSM;
560                         else
561                                 mr1 |= EMAC_MR1_JPSM;
562                         dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
563                 } else
564                         dev->stop_timeout = STOP_TIMEOUT_1000;
565                 break;
566         case SPEED_100:
567                 mr1 |= EMAC_MR1_MF_100;
568                 dev->stop_timeout = STOP_TIMEOUT_100;
569                 break;
570         default: /* make gcc happy */
571                 break;
572         }
573
574         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
575                 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
576                                 dev->phy.speed);
577         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
578                 zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
579
580         /* on 40x erratum forces us to NOT use integrated flow control,
581          * let's hope it works on 44x ;)
582          */
583         if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
584             dev->phy.duplex == DUPLEX_FULL) {
585                 if (dev->phy.pause)
586                         mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
587                 else if (dev->phy.asym_pause)
588                         mr1 |= EMAC_MR1_APP;
589         }
590
591         /* Add base settings & fifo sizes & program MR1 */
592         mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
593         out_be32(&p->mr1, mr1);
594
595         /* Set individual MAC address */
596         out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
597         out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
598                  (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
599                  ndev->dev_addr[5]);
600
601         /* VLAN Tag Protocol ID */
602         out_be32(&p->vtpid, 0x8100);
603
604         /* Receive mode register */
605         r = emac_iff2rmr(ndev);
606         if (r & EMAC_RMR_MAE)
607                 emac_hash_mc(dev);
608         out_be32(&p->rmr, r);
609
610         /* FIFOs thresholds */
611         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
612                 r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
613                                tx_size / 2 / dev->fifo_entry_size);
614         else
615                 r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
616                               tx_size / 2 / dev->fifo_entry_size);
617         out_be32(&p->tmr1, r);
618         out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
619
620         /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
621            there should be still enough space in FIFO to allow the our link
622            partner time to process this frame and also time to send PAUSE
623            frame itself.
624
625            Here is the worst case scenario for the RX FIFO "headroom"
626            (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
627
628            1) One maximum-length frame on TX                    1522 bytes
629            2) One PAUSE frame time                                64 bytes
630            3) PAUSE frame decode time allowance                   64 bytes
631            4) One maximum-length frame on RX                    1522 bytes
632            5) Round-trip propagation delay of the link (100Mb)    15 bytes
633            ----------
634            3187 bytes
635
636            I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
637            low-water mark  to RX_FIFO_SIZE / 8 (512 bytes)
638          */
639         r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
640                            rx_size / 4 / dev->fifo_entry_size);
641         out_be32(&p->rwmr, r);
642
643         /* Set PAUSE timer to the maximum */
644         out_be32(&p->ptr, 0xffff);
645
646         /* IRQ sources */
647         r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
648                 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
649                 EMAC_ISR_IRE | EMAC_ISR_TE;
650         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
651             r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE |
652                                                   EMAC4_ISR_RXOE | */;
653         out_be32(&p->iser,  r);
654
655         /* We need to take GPCS PHY out of isolate mode after EMAC reset */
656         if (emac_phy_gpcs(dev->phy.mode))
657                 emac_mii_reset_phy(&dev->phy);
658
659         /* Required for Pause packet support in EMAC */
660         dev_mc_add(ndev, default_mcast_addr, sizeof(default_mcast_addr), 1);
661
662         return 0;
663 }
664
665 static void emac_reinitialize(struct emac_instance *dev)
666 {
667         DBG(dev, "reinitialize" NL);
668
669         emac_netif_stop(dev);
670         if (!emac_configure(dev)) {
671                 emac_tx_enable(dev);
672                 emac_rx_enable(dev);
673         }
674         emac_netif_start(dev);
675 }
676
677 static void emac_full_tx_reset(struct emac_instance *dev)
678 {
679         DBG(dev, "full_tx_reset" NL);
680
681         emac_tx_disable(dev);
682         mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
683         emac_clean_tx_ring(dev);
684         dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
685
686         emac_configure(dev);
687
688         mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
689         emac_tx_enable(dev);
690         emac_rx_enable(dev);
691 }
692
693 static void emac_reset_work(struct work_struct *work)
694 {
695         struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
696
697         DBG(dev, "reset_work" NL);
698
699         mutex_lock(&dev->link_lock);
700         if (dev->opened) {
701                 emac_netif_stop(dev);
702                 emac_full_tx_reset(dev);
703                 emac_netif_start(dev);
704         }
705         mutex_unlock(&dev->link_lock);
706 }
707
708 static void emac_tx_timeout(struct net_device *ndev)
709 {
710         struct emac_instance *dev = netdev_priv(ndev);
711
712         DBG(dev, "tx_timeout" NL);
713
714         schedule_work(&dev->reset_work);
715 }
716
717
718 static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
719 {
720         int done = !!(stacr & EMAC_STACR_OC);
721
722         if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
723                 done = !done;
724
725         return done;
726 };
727
728 static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
729 {
730         struct emac_regs __iomem *p = dev->emacp;
731         u32 r = 0;
732         int n, err = -ETIMEDOUT;
733
734         mutex_lock(&dev->mdio_lock);
735
736         DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
737
738         /* Enable proper MDIO port */
739         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
740                 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
741         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
742                 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
743
744         /* Wait for management interface to become idle */
745         n = 20;
746         while (!emac_phy_done(dev, in_be32(&p->stacr))) {
747                 udelay(1);
748                 if (!--n) {
749                         DBG2(dev, " -> timeout wait idle\n");
750                         goto bail;
751                 }
752         }
753
754         /* Issue read command */
755         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
756                 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
757         else
758                 r = EMAC_STACR_BASE(dev->opb_bus_freq);
759         if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
760                 r |= EMAC_STACR_OC;
761         if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
762                 r |= EMACX_STACR_STAC_READ;
763         else
764                 r |= EMAC_STACR_STAC_READ;
765         r |= (reg & EMAC_STACR_PRA_MASK)
766                 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
767         out_be32(&p->stacr, r);
768
769         /* Wait for read to complete */
770         n = 200;
771         while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
772                 udelay(1);
773                 if (!--n) {
774                         DBG2(dev, " -> timeout wait complete\n");
775                         goto bail;
776                 }
777         }
778
779         if (unlikely(r & EMAC_STACR_PHYE)) {
780                 DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
781                 err = -EREMOTEIO;
782                 goto bail;
783         }
784
785         r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
786
787         DBG2(dev, "mdio_read -> %04x" NL, r);
788         err = 0;
789  bail:
790         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
791                 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
792         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
793                 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
794         mutex_unlock(&dev->mdio_lock);
795
796         return err == 0 ? r : err;
797 }
798
799 static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
800                               u16 val)
801 {
802         struct emac_regs __iomem *p = dev->emacp;
803         u32 r = 0;
804         int n, err = -ETIMEDOUT;
805
806         mutex_lock(&dev->mdio_lock);
807
808         DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
809
810         /* Enable proper MDIO port */
811         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
812                 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
813         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
814                 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
815
816         /* Wait for management interface to be idle */
817         n = 20;
818         while (!emac_phy_done(dev, in_be32(&p->stacr))) {
819                 udelay(1);
820                 if (!--n) {
821                         DBG2(dev, " -> timeout wait idle\n");
822                         goto bail;
823                 }
824         }
825
826         /* Issue write command */
827         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
828                 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
829         else
830                 r = EMAC_STACR_BASE(dev->opb_bus_freq);
831         if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
832                 r |= EMAC_STACR_OC;
833         if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
834                 r |= EMACX_STACR_STAC_WRITE;
835         else
836                 r |= EMAC_STACR_STAC_WRITE;
837         r |= (reg & EMAC_STACR_PRA_MASK) |
838                 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
839                 (val << EMAC_STACR_PHYD_SHIFT);
840         out_be32(&p->stacr, r);
841
842         /* Wait for write to complete */
843         n = 200;
844         while (!emac_phy_done(dev, in_be32(&p->stacr))) {
845                 udelay(1);
846                 if (!--n) {
847                         DBG2(dev, " -> timeout wait complete\n");
848                         goto bail;
849                 }
850         }
851         err = 0;
852  bail:
853         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
854                 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
855         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
856                 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
857         mutex_unlock(&dev->mdio_lock);
858 }
859
860 static int emac_mdio_read(struct net_device *ndev, int id, int reg)
861 {
862         struct emac_instance *dev = netdev_priv(ndev);
863         int res;
864
865         res = __emac_mdio_read(dev->mdio_instance ? dev->mdio_instance : dev,
866                                (u8) id, (u8) reg);
867         return res;
868 }
869
870 static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
871 {
872         struct emac_instance *dev = netdev_priv(ndev);
873
874         __emac_mdio_write(dev->mdio_instance ? dev->mdio_instance : dev,
875                           (u8) id, (u8) reg, (u16) val);
876 }
877
878 /* Tx lock BH */
879 static void __emac_set_multicast_list(struct emac_instance *dev)
880 {
881         struct emac_regs __iomem *p = dev->emacp;
882         u32 rmr = emac_iff2rmr(dev->ndev);
883
884         DBG(dev, "__multicast %08x" NL, rmr);
885
886         /* I decided to relax register access rules here to avoid
887          * full EMAC reset.
888          *
889          * There is a real problem with EMAC4 core if we use MWSW_001 bit
890          * in MR1 register and do a full EMAC reset.
891          * One TX BD status update is delayed and, after EMAC reset, it
892          * never happens, resulting in TX hung (it'll be recovered by TX
893          * timeout handler eventually, but this is just gross).
894          * So we either have to do full TX reset or try to cheat here :)
895          *
896          * The only required change is to RX mode register, so I *think* all
897          * we need is just to stop RX channel. This seems to work on all
898          * tested SoCs.                                                --ebs
899          *
900          * If we need the full reset, we might just trigger the workqueue
901          * and do it async... a bit nasty but should work --BenH
902          */
903         dev->mcast_pending = 0;
904         emac_rx_disable(dev);
905         if (rmr & EMAC_RMR_MAE)
906                 emac_hash_mc(dev);
907         out_be32(&p->rmr, rmr);
908         emac_rx_enable(dev);
909 }
910
911 /* Tx lock BH */
912 static void emac_set_multicast_list(struct net_device *ndev)
913 {
914         struct emac_instance *dev = netdev_priv(ndev);
915
916         DBG(dev, "multicast" NL);
917
918         BUG_ON(!netif_running(dev->ndev));
919
920         if (dev->no_mcast) {
921                 dev->mcast_pending = 1;
922                 return;
923         }
924         __emac_set_multicast_list(dev);
925 }
926
927 static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
928 {
929         int rx_sync_size = emac_rx_sync_size(new_mtu);
930         int rx_skb_size = emac_rx_skb_size(new_mtu);
931         int i, ret = 0;
932
933         mutex_lock(&dev->link_lock);
934         emac_netif_stop(dev);
935         emac_rx_disable(dev);
936         mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
937
938         if (dev->rx_sg_skb) {
939                 ++dev->estats.rx_dropped_resize;
940                 dev_kfree_skb(dev->rx_sg_skb);
941                 dev->rx_sg_skb = NULL;
942         }
943
944         /* Make a first pass over RX ring and mark BDs ready, dropping
945          * non-processed packets on the way. We need this as a separate pass
946          * to simplify error recovery in the case of allocation failure later.
947          */
948         for (i = 0; i < NUM_RX_BUFF; ++i) {
949                 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
950                         ++dev->estats.rx_dropped_resize;
951
952                 dev->rx_desc[i].data_len = 0;
953                 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
954                     (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
955         }
956
957         /* Reallocate RX ring only if bigger skb buffers are required */
958         if (rx_skb_size <= dev->rx_skb_size)
959                 goto skip;
960
961         /* Second pass, allocate new skbs */
962         for (i = 0; i < NUM_RX_BUFF; ++i) {
963                 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
964                 if (!skb) {
965                         ret = -ENOMEM;
966                         goto oom;
967                 }
968
969                 BUG_ON(!dev->rx_skb[i]);
970                 dev_kfree_skb(dev->rx_skb[i]);
971
972                 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
973                 dev->rx_desc[i].data_ptr =
974                     dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
975                                    DMA_FROM_DEVICE) + 2;
976                 dev->rx_skb[i] = skb;
977         }
978  skip:
979         /* Check if we need to change "Jumbo" bit in MR1 */
980         if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
981                 /* This is to prevent starting RX channel in emac_rx_enable() */
982                 set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
983
984                 dev->ndev->mtu = new_mtu;
985                 emac_full_tx_reset(dev);
986         }
987
988         mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
989  oom:
990         /* Restart RX */
991         clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
992         dev->rx_slot = 0;
993         mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
994         emac_rx_enable(dev);
995         emac_netif_start(dev);
996         mutex_unlock(&dev->link_lock);
997
998         return ret;
999 }
1000
1001 /* Process ctx, rtnl_lock semaphore */
1002 static int emac_change_mtu(struct net_device *ndev, int new_mtu)
1003 {
1004         struct emac_instance *dev = netdev_priv(ndev);
1005         int ret = 0;
1006
1007         if (new_mtu < EMAC_MIN_MTU || new_mtu > dev->max_mtu)
1008                 return -EINVAL;
1009
1010         DBG(dev, "change_mtu(%d)" NL, new_mtu);
1011
1012         if (netif_running(ndev)) {
1013                 /* Check if we really need to reinitalize RX ring */
1014                 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
1015                         ret = emac_resize_rx_ring(dev, new_mtu);
1016         }
1017
1018         if (!ret) {
1019                 ndev->mtu = new_mtu;
1020                 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
1021                 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
1022         }
1023
1024         return ret;
1025 }
1026
1027 static void emac_clean_tx_ring(struct emac_instance *dev)
1028 {
1029         int i;
1030
1031         for (i = 0; i < NUM_TX_BUFF; ++i) {
1032                 if (dev->tx_skb[i]) {
1033                         dev_kfree_skb(dev->tx_skb[i]);
1034                         dev->tx_skb[i] = NULL;
1035                         if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
1036                                 ++dev->estats.tx_dropped;
1037                 }
1038                 dev->tx_desc[i].ctrl = 0;
1039                 dev->tx_desc[i].data_ptr = 0;
1040         }
1041 }
1042
1043 static void emac_clean_rx_ring(struct emac_instance *dev)
1044 {
1045         int i;
1046
1047         for (i = 0; i < NUM_RX_BUFF; ++i)
1048                 if (dev->rx_skb[i]) {
1049                         dev->rx_desc[i].ctrl = 0;
1050                         dev_kfree_skb(dev->rx_skb[i]);
1051                         dev->rx_skb[i] = NULL;
1052                         dev->rx_desc[i].data_ptr = 0;
1053                 }
1054
1055         if (dev->rx_sg_skb) {
1056                 dev_kfree_skb(dev->rx_sg_skb);
1057                 dev->rx_sg_skb = NULL;
1058         }
1059 }
1060
1061 static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
1062                                     gfp_t flags)
1063 {
1064         struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
1065         if (unlikely(!skb))
1066                 return -ENOMEM;
1067
1068         dev->rx_skb[slot] = skb;
1069         dev->rx_desc[slot].data_len = 0;
1070
1071         skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1072         dev->rx_desc[slot].data_ptr =
1073             dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
1074                            DMA_FROM_DEVICE) + 2;
1075         wmb();
1076         dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1077             (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1078
1079         return 0;
1080 }
1081
1082 static void emac_print_link_status(struct emac_instance *dev)
1083 {
1084         if (netif_carrier_ok(dev->ndev))
1085                 printk(KERN_INFO "%s: link is up, %d %s%s\n",
1086                        dev->ndev->name, dev->phy.speed,
1087                        dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
1088                        dev->phy.pause ? ", pause enabled" :
1089                        dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
1090         else
1091                 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
1092 }
1093
1094 /* Process ctx, rtnl_lock semaphore */
1095 static int emac_open(struct net_device *ndev)
1096 {
1097         struct emac_instance *dev = netdev_priv(ndev);
1098         int err, i;
1099
1100         DBG(dev, "open" NL);
1101
1102         /* Setup error IRQ handler */
1103         err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
1104         if (err) {
1105                 printk(KERN_ERR "%s: failed to request IRQ %d\n",
1106                        ndev->name, dev->emac_irq);
1107                 return err;
1108         }
1109
1110         /* Allocate RX ring */
1111         for (i = 0; i < NUM_RX_BUFF; ++i)
1112                 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
1113                         printk(KERN_ERR "%s: failed to allocate RX ring\n",
1114                                ndev->name);
1115                         goto oom;
1116                 }
1117
1118         dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
1119         clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1120         dev->rx_sg_skb = NULL;
1121
1122         mutex_lock(&dev->link_lock);
1123         dev->opened = 1;
1124
1125         /* Start PHY polling now.
1126          */
1127         if (dev->phy.address >= 0) {
1128                 int link_poll_interval;
1129                 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1130                         dev->phy.def->ops->read_link(&dev->phy);
1131                         emac_rx_clk_default(dev);
1132                         netif_carrier_on(dev->ndev);
1133                         link_poll_interval = PHY_POLL_LINK_ON;
1134                 } else {
1135                         emac_rx_clk_tx(dev);
1136                         netif_carrier_off(dev->ndev);
1137                         link_poll_interval = PHY_POLL_LINK_OFF;
1138                 }
1139                 dev->link_polling = 1;
1140                 wmb();
1141                 schedule_delayed_work(&dev->link_work, link_poll_interval);
1142                 emac_print_link_status(dev);
1143         } else
1144                 netif_carrier_on(dev->ndev);
1145
1146         emac_configure(dev);
1147         mal_poll_add(dev->mal, &dev->commac);
1148         mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
1149         mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
1150         mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1151         emac_tx_enable(dev);
1152         emac_rx_enable(dev);
1153         emac_netif_start(dev);
1154
1155         mutex_unlock(&dev->link_lock);
1156
1157         return 0;
1158  oom:
1159         emac_clean_rx_ring(dev);
1160         free_irq(dev->emac_irq, dev);
1161
1162         return -ENOMEM;
1163 }
1164
1165 /* BHs disabled */
1166 #if 0
1167 static int emac_link_differs(struct emac_instance *dev)
1168 {
1169         u32 r = in_be32(&dev->emacp->mr1);
1170
1171         int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
1172         int speed, pause, asym_pause;
1173
1174         if (r & EMAC_MR1_MF_1000)
1175                 speed = SPEED_1000;
1176         else if (r & EMAC_MR1_MF_100)
1177                 speed = SPEED_100;
1178         else
1179                 speed = SPEED_10;
1180
1181         switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
1182         case (EMAC_MR1_EIFC | EMAC_MR1_APP):
1183                 pause = 1;
1184                 asym_pause = 0;
1185                 break;
1186         case EMAC_MR1_APP:
1187                 pause = 0;
1188                 asym_pause = 1;
1189                 break;
1190         default:
1191                 pause = asym_pause = 0;
1192         }
1193         return speed != dev->phy.speed || duplex != dev->phy.duplex ||
1194             pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
1195 }
1196 #endif
1197
1198 static void emac_link_timer(struct work_struct *work)
1199 {
1200         struct emac_instance *dev =
1201                 container_of((struct delayed_work *)work,
1202                              struct emac_instance, link_work);
1203         int link_poll_interval;
1204
1205         mutex_lock(&dev->link_lock);
1206         DBG2(dev, "link timer" NL);
1207
1208         if (!dev->opened)
1209                 goto bail;
1210
1211         if (dev->phy.def->ops->poll_link(&dev->phy)) {
1212                 if (!netif_carrier_ok(dev->ndev)) {
1213                         emac_rx_clk_default(dev);
1214                         /* Get new link parameters */
1215                         dev->phy.def->ops->read_link(&dev->phy);
1216
1217                         netif_carrier_on(dev->ndev);
1218                         emac_netif_stop(dev);
1219                         emac_full_tx_reset(dev);
1220                         emac_netif_start(dev);
1221                         emac_print_link_status(dev);
1222                 }
1223                 link_poll_interval = PHY_POLL_LINK_ON;
1224         } else {
1225                 if (netif_carrier_ok(dev->ndev)) {
1226                         emac_rx_clk_tx(dev);
1227                         netif_carrier_off(dev->ndev);
1228                         netif_tx_disable(dev->ndev);
1229                         emac_reinitialize(dev);
1230                         emac_print_link_status(dev);
1231                 }
1232                 link_poll_interval = PHY_POLL_LINK_OFF;
1233         }
1234         schedule_delayed_work(&dev->link_work, link_poll_interval);
1235  bail:
1236         mutex_unlock(&dev->link_lock);
1237 }
1238
1239 static void emac_force_link_update(struct emac_instance *dev)
1240 {
1241         netif_carrier_off(dev->ndev);
1242         smp_rmb();
1243         if (dev->link_polling) {
1244                 cancel_rearming_delayed_work(&dev->link_work);
1245                 if (dev->link_polling)
1246                         schedule_delayed_work(&dev->link_work,  PHY_POLL_LINK_OFF);
1247         }
1248 }
1249
1250 /* Process ctx, rtnl_lock semaphore */
1251 static int emac_close(struct net_device *ndev)
1252 {
1253         struct emac_instance *dev = netdev_priv(ndev);
1254
1255         DBG(dev, "close" NL);
1256
1257         if (dev->phy.address >= 0) {
1258                 dev->link_polling = 0;
1259                 cancel_rearming_delayed_work(&dev->link_work);
1260         }
1261         mutex_lock(&dev->link_lock);
1262         emac_netif_stop(dev);
1263         dev->opened = 0;
1264         mutex_unlock(&dev->link_lock);
1265
1266         emac_rx_disable(dev);
1267         emac_tx_disable(dev);
1268         mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1269         mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
1270         mal_poll_del(dev->mal, &dev->commac);
1271
1272         emac_clean_tx_ring(dev);
1273         emac_clean_rx_ring(dev);
1274
1275         free_irq(dev->emac_irq, dev);
1276
1277         return 0;
1278 }
1279
1280 static inline u16 emac_tx_csum(struct emac_instance *dev,
1281                                struct sk_buff *skb)
1282 {
1283         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
1284                 (skb->ip_summed == CHECKSUM_PARTIAL)) {
1285                 ++dev->stats.tx_packets_csum;
1286                 return EMAC_TX_CTRL_TAH_CSUM;
1287         }
1288         return 0;
1289 }
1290
1291 static inline int emac_xmit_finish(struct emac_instance *dev, int len)
1292 {
1293         struct emac_regs __iomem *p = dev->emacp;
1294         struct net_device *ndev = dev->ndev;
1295
1296         /* Send the packet out. If the if makes a significant perf
1297          * difference, then we can store the TMR0 value in "dev"
1298          * instead
1299          */
1300         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1301                 out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
1302         else
1303                 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1304
1305         if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1306                 netif_stop_queue(ndev);
1307                 DBG2(dev, "stopped TX queue" NL);
1308         }
1309
1310         ndev->trans_start = jiffies;
1311         ++dev->stats.tx_packets;
1312         dev->stats.tx_bytes += len;
1313
1314         return 0;
1315 }
1316
1317 /* Tx lock BH */
1318 static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1319 {
1320         struct emac_instance *dev = netdev_priv(ndev);
1321         unsigned int len = skb->len;
1322         int slot;
1323
1324         u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1325             MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1326
1327         slot = dev->tx_slot++;
1328         if (dev->tx_slot == NUM_TX_BUFF) {
1329                 dev->tx_slot = 0;
1330                 ctrl |= MAL_TX_CTRL_WRAP;
1331         }
1332
1333         DBG2(dev, "xmit(%u) %d" NL, len, slot);
1334
1335         dev->tx_skb[slot] = skb;
1336         dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
1337                                                      skb->data, len,
1338                                                      DMA_TO_DEVICE);
1339         dev->tx_desc[slot].data_len = (u16) len;
1340         wmb();
1341         dev->tx_desc[slot].ctrl = ctrl;
1342
1343         return emac_xmit_finish(dev, len);
1344 }
1345
1346 static inline int emac_xmit_split(struct emac_instance *dev, int slot,
1347                                   u32 pd, int len, int last, u16 base_ctrl)
1348 {
1349         while (1) {
1350                 u16 ctrl = base_ctrl;
1351                 int chunk = min(len, MAL_MAX_TX_SIZE);
1352                 len -= chunk;
1353
1354                 slot = (slot + 1) % NUM_TX_BUFF;
1355
1356                 if (last && !len)
1357                         ctrl |= MAL_TX_CTRL_LAST;
1358                 if (slot == NUM_TX_BUFF - 1)
1359                         ctrl |= MAL_TX_CTRL_WRAP;
1360
1361                 dev->tx_skb[slot] = NULL;
1362                 dev->tx_desc[slot].data_ptr = pd;
1363                 dev->tx_desc[slot].data_len = (u16) chunk;
1364                 dev->tx_desc[slot].ctrl = ctrl;
1365                 ++dev->tx_cnt;
1366
1367                 if (!len)
1368                         break;
1369
1370                 pd += chunk;
1371         }
1372         return slot;
1373 }
1374
1375 /* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1376 static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1377 {
1378         struct emac_instance *dev = netdev_priv(ndev);
1379         int nr_frags = skb_shinfo(skb)->nr_frags;
1380         int len = skb->len, chunk;
1381         int slot, i;
1382         u16 ctrl;
1383         u32 pd;
1384
1385         /* This is common "fast" path */
1386         if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1387                 return emac_start_xmit(skb, ndev);
1388
1389         len -= skb->data_len;
1390
1391         /* Note, this is only an *estimation*, we can still run out of empty
1392          * slots because of the additional fragmentation into
1393          * MAL_MAX_TX_SIZE-sized chunks
1394          */
1395         if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1396                 goto stop_queue;
1397
1398         ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1399             emac_tx_csum(dev, skb);
1400         slot = dev->tx_slot;
1401
1402         /* skb data */
1403         dev->tx_skb[slot] = NULL;
1404         chunk = min(len, MAL_MAX_TX_SIZE);
1405         dev->tx_desc[slot].data_ptr = pd =
1406             dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
1407         dev->tx_desc[slot].data_len = (u16) chunk;
1408         len -= chunk;
1409         if (unlikely(len))
1410                 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1411                                        ctrl);
1412         /* skb fragments */
1413         for (i = 0; i < nr_frags; ++i) {
1414                 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1415                 len = frag->size;
1416
1417                 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1418                         goto undo_frame;
1419
1420                 pd = dma_map_page(&dev->ofdev->dev, frag->page, frag->page_offset, len,
1421                                   DMA_TO_DEVICE);
1422
1423                 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1424                                        ctrl);
1425         }
1426
1427         DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
1428
1429         /* Attach skb to the last slot so we don't release it too early */
1430         dev->tx_skb[slot] = skb;
1431
1432         /* Send the packet out */
1433         if (dev->tx_slot == NUM_TX_BUFF - 1)
1434                 ctrl |= MAL_TX_CTRL_WRAP;
1435         wmb();
1436         dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1437         dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1438
1439         return emac_xmit_finish(dev, skb->len);
1440
1441  undo_frame:
1442         /* Well, too bad. Our previous estimation was overly optimistic.
1443          * Undo everything.
1444          */
1445         while (slot != dev->tx_slot) {
1446                 dev->tx_desc[slot].ctrl = 0;
1447                 --dev->tx_cnt;
1448                 if (--slot < 0)
1449                         slot = NUM_TX_BUFF - 1;
1450         }
1451         ++dev->estats.tx_undo;
1452
1453  stop_queue:
1454         netif_stop_queue(ndev);
1455         DBG2(dev, "stopped TX queue" NL);
1456         return 1;
1457 }
1458
1459 /* Tx lock BHs */
1460 static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
1461 {
1462         struct emac_error_stats *st = &dev->estats;
1463
1464         DBG(dev, "BD TX error %04x" NL, ctrl);
1465
1466         ++st->tx_bd_errors;
1467         if (ctrl & EMAC_TX_ST_BFCS)
1468                 ++st->tx_bd_bad_fcs;
1469         if (ctrl & EMAC_TX_ST_LCS)
1470                 ++st->tx_bd_carrier_loss;
1471         if (ctrl & EMAC_TX_ST_ED)
1472                 ++st->tx_bd_excessive_deferral;
1473         if (ctrl & EMAC_TX_ST_EC)
1474                 ++st->tx_bd_excessive_collisions;
1475         if (ctrl & EMAC_TX_ST_LC)
1476                 ++st->tx_bd_late_collision;
1477         if (ctrl & EMAC_TX_ST_MC)
1478                 ++st->tx_bd_multple_collisions;
1479         if (ctrl & EMAC_TX_ST_SC)
1480                 ++st->tx_bd_single_collision;
1481         if (ctrl & EMAC_TX_ST_UR)
1482                 ++st->tx_bd_underrun;
1483         if (ctrl & EMAC_TX_ST_SQE)
1484                 ++st->tx_bd_sqe;
1485 }
1486
1487 static void emac_poll_tx(void *param)
1488 {
1489         struct emac_instance *dev = param;
1490         u32 bad_mask;
1491
1492         DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
1493
1494         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1495                 bad_mask = EMAC_IS_BAD_TX_TAH;
1496         else
1497                 bad_mask = EMAC_IS_BAD_TX;
1498
1499         netif_tx_lock_bh(dev->ndev);
1500         if (dev->tx_cnt) {
1501                 u16 ctrl;
1502                 int slot = dev->ack_slot, n = 0;
1503         again:
1504                 ctrl = dev->tx_desc[slot].ctrl;
1505                 if (!(ctrl & MAL_TX_CTRL_READY)) {
1506                         struct sk_buff *skb = dev->tx_skb[slot];
1507                         ++n;
1508
1509                         if (skb) {
1510                                 dev_kfree_skb(skb);
1511                                 dev->tx_skb[slot] = NULL;
1512                         }
1513                         slot = (slot + 1) % NUM_TX_BUFF;
1514
1515                         if (unlikely(ctrl & bad_mask))
1516                                 emac_parse_tx_error(dev, ctrl);
1517
1518                         if (--dev->tx_cnt)
1519                                 goto again;
1520                 }
1521                 if (n) {
1522                         dev->ack_slot = slot;
1523                         if (netif_queue_stopped(dev->ndev) &&
1524                             dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1525                                 netif_wake_queue(dev->ndev);
1526
1527                         DBG2(dev, "tx %d pkts" NL, n);
1528                 }
1529         }
1530         netif_tx_unlock_bh(dev->ndev);
1531 }
1532
1533 static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
1534                                        int len)
1535 {
1536         struct sk_buff *skb = dev->rx_skb[slot];
1537
1538         DBG2(dev, "recycle %d %d" NL, slot, len);
1539
1540         if (len)
1541                 dma_map_single(&dev->ofdev->dev, skb->data - 2,
1542                                EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1543
1544         dev->rx_desc[slot].data_len = 0;
1545         wmb();
1546         dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1547             (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1548 }
1549
1550 static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
1551 {
1552         struct emac_error_stats *st = &dev->estats;
1553
1554         DBG(dev, "BD RX error %04x" NL, ctrl);
1555
1556         ++st->rx_bd_errors;
1557         if (ctrl & EMAC_RX_ST_OE)
1558                 ++st->rx_bd_overrun;
1559         if (ctrl & EMAC_RX_ST_BP)
1560                 ++st->rx_bd_bad_packet;
1561         if (ctrl & EMAC_RX_ST_RP)
1562                 ++st->rx_bd_runt_packet;
1563         if (ctrl & EMAC_RX_ST_SE)
1564                 ++st->rx_bd_short_event;
1565         if (ctrl & EMAC_RX_ST_AE)
1566                 ++st->rx_bd_alignment_error;
1567         if (ctrl & EMAC_RX_ST_BFCS)
1568                 ++st->rx_bd_bad_fcs;
1569         if (ctrl & EMAC_RX_ST_PTL)
1570                 ++st->rx_bd_packet_too_long;
1571         if (ctrl & EMAC_RX_ST_ORE)
1572                 ++st->rx_bd_out_of_range;
1573         if (ctrl & EMAC_RX_ST_IRE)
1574                 ++st->rx_bd_in_range;
1575 }
1576
1577 static inline void emac_rx_csum(struct emac_instance *dev,
1578                                 struct sk_buff *skb, u16 ctrl)
1579 {
1580 #ifdef CONFIG_IBM_NEW_EMAC_TAH
1581         if (!ctrl && dev->tah_dev) {
1582                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1583                 ++dev->stats.rx_packets_csum;
1584         }
1585 #endif
1586 }
1587
1588 static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
1589 {
1590         if (likely(dev->rx_sg_skb != NULL)) {
1591                 int len = dev->rx_desc[slot].data_len;
1592                 int tot_len = dev->rx_sg_skb->len + len;
1593
1594                 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1595                         ++dev->estats.rx_dropped_mtu;
1596                         dev_kfree_skb(dev->rx_sg_skb);
1597                         dev->rx_sg_skb = NULL;
1598                 } else {
1599                         cacheable_memcpy(skb_tail_pointer(dev->rx_sg_skb),
1600                                          dev->rx_skb[slot]->data, len);
1601                         skb_put(dev->rx_sg_skb, len);
1602                         emac_recycle_rx_skb(dev, slot, len);
1603                         return 0;
1604                 }
1605         }
1606         emac_recycle_rx_skb(dev, slot, 0);
1607         return -1;
1608 }
1609
1610 /* NAPI poll context */
1611 static int emac_poll_rx(void *param, int budget)
1612 {
1613         struct emac_instance *dev = param;
1614         int slot = dev->rx_slot, received = 0;
1615
1616         DBG2(dev, "poll_rx(%d)" NL, budget);
1617
1618  again:
1619         while (budget > 0) {
1620                 int len;
1621                 struct sk_buff *skb;
1622                 u16 ctrl = dev->rx_desc[slot].ctrl;
1623
1624                 if (ctrl & MAL_RX_CTRL_EMPTY)
1625                         break;
1626
1627                 skb = dev->rx_skb[slot];
1628                 mb();
1629                 len = dev->rx_desc[slot].data_len;
1630
1631                 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1632                         goto sg;
1633
1634                 ctrl &= EMAC_BAD_RX_MASK;
1635                 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1636                         emac_parse_rx_error(dev, ctrl);
1637                         ++dev->estats.rx_dropped_error;
1638                         emac_recycle_rx_skb(dev, slot, 0);
1639                         len = 0;
1640                         goto next;
1641                 }
1642
1643                 if (len < ETH_HLEN) {
1644                         ++dev->estats.rx_dropped_stack;
1645                         emac_recycle_rx_skb(dev, slot, len);
1646                         goto next;
1647                 }
1648
1649                 if (len && len < EMAC_RX_COPY_THRESH) {
1650                         struct sk_buff *copy_skb =
1651                             alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1652                         if (unlikely(!copy_skb))
1653                                 goto oom;
1654
1655                         skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1656                         cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1657                                          len + 2);
1658                         emac_recycle_rx_skb(dev, slot, len);
1659                         skb = copy_skb;
1660                 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1661                         goto oom;
1662
1663                 skb_put(skb, len);
1664         push_packet:
1665                 skb->dev = dev->ndev;
1666                 skb->protocol = eth_type_trans(skb, dev->ndev);
1667                 emac_rx_csum(dev, skb, ctrl);
1668
1669                 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1670                         ++dev->estats.rx_dropped_stack;
1671         next:
1672                 ++dev->stats.rx_packets;
1673         skip:
1674                 dev->stats.rx_bytes += len;
1675                 slot = (slot + 1) % NUM_RX_BUFF;
1676                 --budget;
1677                 ++received;
1678                 continue;
1679         sg:
1680                 if (ctrl & MAL_RX_CTRL_FIRST) {
1681                         BUG_ON(dev->rx_sg_skb);
1682                         if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1683                                 DBG(dev, "rx OOM %d" NL, slot);
1684                                 ++dev->estats.rx_dropped_oom;
1685                                 emac_recycle_rx_skb(dev, slot, 0);
1686                         } else {
1687                                 dev->rx_sg_skb = skb;
1688                                 skb_put(skb, len);
1689                         }
1690                 } else if (!emac_rx_sg_append(dev, slot) &&
1691                            (ctrl & MAL_RX_CTRL_LAST)) {
1692
1693                         skb = dev->rx_sg_skb;
1694                         dev->rx_sg_skb = NULL;
1695
1696                         ctrl &= EMAC_BAD_RX_MASK;
1697                         if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1698                                 emac_parse_rx_error(dev, ctrl);
1699                                 ++dev->estats.rx_dropped_error;
1700                                 dev_kfree_skb(skb);
1701                                 len = 0;
1702                         } else
1703                                 goto push_packet;
1704                 }
1705                 goto skip;
1706         oom:
1707                 DBG(dev, "rx OOM %d" NL, slot);
1708                 /* Drop the packet and recycle skb */
1709                 ++dev->estats.rx_dropped_oom;
1710                 emac_recycle_rx_skb(dev, slot, 0);
1711                 goto next;
1712         }
1713
1714         if (received) {
1715                 DBG2(dev, "rx %d BDs" NL, received);
1716                 dev->rx_slot = slot;
1717         }
1718
1719         if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
1720                 mb();
1721                 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1722                         DBG2(dev, "rx restart" NL);
1723                         received = 0;
1724                         goto again;
1725                 }
1726
1727                 if (dev->rx_sg_skb) {
1728                         DBG2(dev, "dropping partial rx packet" NL);
1729                         ++dev->estats.rx_dropped_error;
1730                         dev_kfree_skb(dev->rx_sg_skb);
1731                         dev->rx_sg_skb = NULL;
1732                 }
1733
1734                 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1735                 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1736                 emac_rx_enable(dev);
1737                 dev->rx_slot = 0;
1738         }
1739         return received;
1740 }
1741
1742 /* NAPI poll context */
1743 static int emac_peek_rx(void *param)
1744 {
1745         struct emac_instance *dev = param;
1746
1747         return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1748 }
1749
1750 /* NAPI poll context */
1751 static int emac_peek_rx_sg(void *param)
1752 {
1753         struct emac_instance *dev = param;
1754
1755         int slot = dev->rx_slot;
1756         while (1) {
1757                 u16 ctrl = dev->rx_desc[slot].ctrl;
1758                 if (ctrl & MAL_RX_CTRL_EMPTY)
1759                         return 0;
1760                 else if (ctrl & MAL_RX_CTRL_LAST)
1761                         return 1;
1762
1763                 slot = (slot + 1) % NUM_RX_BUFF;
1764
1765                 /* I'm just being paranoid here :) */
1766                 if (unlikely(slot == dev->rx_slot))
1767                         return 0;
1768         }
1769 }
1770
1771 /* Hard IRQ */
1772 static void emac_rxde(void *param)
1773 {
1774         struct emac_instance *dev = param;
1775
1776         ++dev->estats.rx_stopped;
1777         emac_rx_disable_async(dev);
1778 }
1779
1780 /* Hard IRQ */
1781 static irqreturn_t emac_irq(int irq, void *dev_instance)
1782 {
1783         struct emac_instance *dev = dev_instance;
1784         struct emac_regs __iomem *p = dev->emacp;
1785         struct emac_error_stats *st = &dev->estats;
1786         u32 isr;
1787
1788         spin_lock(&dev->lock);
1789
1790         isr = in_be32(&p->isr);
1791         out_be32(&p->isr, isr);
1792
1793         DBG(dev, "isr = %08x" NL, isr);
1794
1795         if (isr & EMAC4_ISR_TXPE)
1796                 ++st->tx_parity;
1797         if (isr & EMAC4_ISR_RXPE)
1798                 ++st->rx_parity;
1799         if (isr & EMAC4_ISR_TXUE)
1800                 ++st->tx_underrun;
1801         if (isr & EMAC4_ISR_RXOE)
1802                 ++st->rx_fifo_overrun;
1803         if (isr & EMAC_ISR_OVR)
1804                 ++st->rx_overrun;
1805         if (isr & EMAC_ISR_BP)
1806                 ++st->rx_bad_packet;
1807         if (isr & EMAC_ISR_RP)
1808                 ++st->rx_runt_packet;
1809         if (isr & EMAC_ISR_SE)
1810                 ++st->rx_short_event;
1811         if (isr & EMAC_ISR_ALE)
1812                 ++st->rx_alignment_error;
1813         if (isr & EMAC_ISR_BFCS)
1814                 ++st->rx_bad_fcs;
1815         if (isr & EMAC_ISR_PTLE)
1816                 ++st->rx_packet_too_long;
1817         if (isr & EMAC_ISR_ORE)
1818                 ++st->rx_out_of_range;
1819         if (isr & EMAC_ISR_IRE)
1820                 ++st->rx_in_range;
1821         if (isr & EMAC_ISR_SQE)
1822                 ++st->tx_sqe;
1823         if (isr & EMAC_ISR_TE)
1824                 ++st->tx_errors;
1825
1826         spin_unlock(&dev->lock);
1827
1828         return IRQ_HANDLED;
1829 }
1830
1831 static struct net_device_stats *emac_stats(struct net_device *ndev)
1832 {
1833         struct emac_instance *dev = netdev_priv(ndev);
1834         struct emac_stats *st = &dev->stats;
1835         struct emac_error_stats *est = &dev->estats;
1836         struct net_device_stats *nst = &dev->nstats;
1837         unsigned long flags;
1838
1839         DBG2(dev, "stats" NL);
1840
1841         /* Compute "legacy" statistics */
1842         spin_lock_irqsave(&dev->lock, flags);
1843         nst->rx_packets = (unsigned long)st->rx_packets;
1844         nst->rx_bytes = (unsigned long)st->rx_bytes;
1845         nst->tx_packets = (unsigned long)st->tx_packets;
1846         nst->tx_bytes = (unsigned long)st->tx_bytes;
1847         nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1848                                           est->rx_dropped_error +
1849                                           est->rx_dropped_resize +
1850                                           est->rx_dropped_mtu);
1851         nst->tx_dropped = (unsigned long)est->tx_dropped;
1852
1853         nst->rx_errors = (unsigned long)est->rx_bd_errors;
1854         nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1855                                               est->rx_fifo_overrun +
1856                                               est->rx_overrun);
1857         nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1858                                                est->rx_alignment_error);
1859         nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1860                                              est->rx_bad_fcs);
1861         nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1862                                                 est->rx_bd_short_event +
1863                                                 est->rx_bd_packet_too_long +
1864                                                 est->rx_bd_out_of_range +
1865                                                 est->rx_bd_in_range +
1866                                                 est->rx_runt_packet +
1867                                                 est->rx_short_event +
1868                                                 est->rx_packet_too_long +
1869                                                 est->rx_out_of_range +
1870                                                 est->rx_in_range);
1871
1872         nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1873         nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1874                                               est->tx_underrun);
1875         nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1876         nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1877                                           est->tx_bd_excessive_collisions +
1878                                           est->tx_bd_late_collision +
1879                                           est->tx_bd_multple_collisions);
1880         spin_unlock_irqrestore(&dev->lock, flags);
1881         return nst;
1882 }
1883
1884 static struct mal_commac_ops emac_commac_ops = {
1885         .poll_tx = &emac_poll_tx,
1886         .poll_rx = &emac_poll_rx,
1887         .peek_rx = &emac_peek_rx,
1888         .rxde = &emac_rxde,
1889 };
1890
1891 static struct mal_commac_ops emac_commac_sg_ops = {
1892         .poll_tx = &emac_poll_tx,
1893         .poll_rx = &emac_poll_rx,
1894         .peek_rx = &emac_peek_rx_sg,
1895         .rxde = &emac_rxde,
1896 };
1897
1898 /* Ethtool support */
1899 static int emac_ethtool_get_settings(struct net_device *ndev,
1900                                      struct ethtool_cmd *cmd)
1901 {
1902         struct emac_instance *dev = netdev_priv(ndev);
1903
1904         cmd->supported = dev->phy.features;
1905         cmd->port = PORT_MII;
1906         cmd->phy_address = dev->phy.address;
1907         cmd->transceiver =
1908             dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1909
1910         mutex_lock(&dev->link_lock);
1911         cmd->advertising = dev->phy.advertising;
1912         cmd->autoneg = dev->phy.autoneg;
1913         cmd->speed = dev->phy.speed;
1914         cmd->duplex = dev->phy.duplex;
1915         mutex_unlock(&dev->link_lock);
1916
1917         return 0;
1918 }
1919
1920 static int emac_ethtool_set_settings(struct net_device *ndev,
1921                                      struct ethtool_cmd *cmd)
1922 {
1923         struct emac_instance *dev = netdev_priv(ndev);
1924         u32 f = dev->phy.features;
1925
1926         DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
1927             cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1928
1929         /* Basic sanity checks */
1930         if (dev->phy.address < 0)
1931                 return -EOPNOTSUPP;
1932         if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1933                 return -EINVAL;
1934         if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1935                 return -EINVAL;
1936         if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1937                 return -EINVAL;
1938
1939         if (cmd->autoneg == AUTONEG_DISABLE) {
1940                 switch (cmd->speed) {
1941                 case SPEED_10:
1942                         if (cmd->duplex == DUPLEX_HALF
1943                             && !(f & SUPPORTED_10baseT_Half))
1944                                 return -EINVAL;
1945                         if (cmd->duplex == DUPLEX_FULL
1946                             && !(f & SUPPORTED_10baseT_Full))
1947                                 return -EINVAL;
1948                         break;
1949                 case SPEED_100:
1950                         if (cmd->duplex == DUPLEX_HALF
1951                             && !(f & SUPPORTED_100baseT_Half))
1952                                 return -EINVAL;
1953                         if (cmd->duplex == DUPLEX_FULL
1954                             && !(f & SUPPORTED_100baseT_Full))
1955                                 return -EINVAL;
1956                         break;
1957                 case SPEED_1000:
1958                         if (cmd->duplex == DUPLEX_HALF
1959                             && !(f & SUPPORTED_1000baseT_Half))
1960                                 return -EINVAL;
1961                         if (cmd->duplex == DUPLEX_FULL
1962                             && !(f & SUPPORTED_1000baseT_Full))
1963                                 return -EINVAL;
1964                         break;
1965                 default:
1966                         return -EINVAL;
1967                 }
1968
1969                 mutex_lock(&dev->link_lock);
1970                 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
1971                                                 cmd->duplex);
1972                 mutex_unlock(&dev->link_lock);
1973
1974         } else {
1975                 if (!(f & SUPPORTED_Autoneg))
1976                         return -EINVAL;
1977
1978                 mutex_lock(&dev->link_lock);
1979                 dev->phy.def->ops->setup_aneg(&dev->phy,
1980                                               (cmd->advertising & f) |
1981                                               (dev->phy.advertising &
1982                                                (ADVERTISED_Pause |
1983                                                 ADVERTISED_Asym_Pause)));
1984                 mutex_unlock(&dev->link_lock);
1985         }
1986         emac_force_link_update(dev);
1987
1988         return 0;
1989 }
1990
1991 static void emac_ethtool_get_ringparam(struct net_device *ndev,
1992                                        struct ethtool_ringparam *rp)
1993 {
1994         rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
1995         rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
1996 }
1997
1998 static void emac_ethtool_get_pauseparam(struct net_device *ndev,
1999                                         struct ethtool_pauseparam *pp)
2000 {
2001         struct emac_instance *dev = netdev_priv(ndev);
2002
2003         mutex_lock(&dev->link_lock);
2004         if ((dev->phy.features & SUPPORTED_Autoneg) &&
2005             (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
2006                 pp->autoneg = 1;
2007
2008         if (dev->phy.duplex == DUPLEX_FULL) {
2009                 if (dev->phy.pause)
2010                         pp->rx_pause = pp->tx_pause = 1;
2011                 else if (dev->phy.asym_pause)
2012                         pp->tx_pause = 1;
2013         }
2014         mutex_unlock(&dev->link_lock);
2015 }
2016
2017 static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
2018 {
2019         struct emac_instance *dev = netdev_priv(ndev);
2020
2021         return dev->tah_dev != NULL;
2022 }
2023
2024 static int emac_get_regs_len(struct emac_instance *dev)
2025 {
2026         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
2027                 return sizeof(struct emac_ethtool_regs_subhdr) +
2028                         EMAC4_ETHTOOL_REGS_SIZE;
2029         else
2030                 return sizeof(struct emac_ethtool_regs_subhdr) +
2031                         EMAC_ETHTOOL_REGS_SIZE;
2032 }
2033
2034 static int emac_ethtool_get_regs_len(struct net_device *ndev)
2035 {
2036         struct emac_instance *dev = netdev_priv(ndev);
2037         int size;
2038
2039         size = sizeof(struct emac_ethtool_regs_hdr) +
2040                 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
2041         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2042                 size += zmii_get_regs_len(dev->zmii_dev);
2043         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2044                 size += rgmii_get_regs_len(dev->rgmii_dev);
2045         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2046                 size += tah_get_regs_len(dev->tah_dev);
2047
2048         return size;
2049 }
2050
2051 static void *emac_dump_regs(struct emac_instance *dev, void *buf)
2052 {
2053         struct emac_ethtool_regs_subhdr *hdr = buf;
2054
2055         hdr->index = dev->cell_index;
2056         if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
2057                 hdr->version = EMAC4_ETHTOOL_REGS_VER;
2058                 memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE);
2059                 return ((void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE);
2060         } else {
2061                 hdr->version = EMAC_ETHTOOL_REGS_VER;
2062                 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE);
2063                 return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE);
2064         }
2065 }
2066
2067 static void emac_ethtool_get_regs(struct net_device *ndev,
2068                                   struct ethtool_regs *regs, void *buf)
2069 {
2070         struct emac_instance *dev = netdev_priv(ndev);
2071         struct emac_ethtool_regs_hdr *hdr = buf;
2072
2073         hdr->components = 0;
2074         buf = hdr + 1;
2075
2076         buf = mal_dump_regs(dev->mal, buf);
2077         buf = emac_dump_regs(dev, buf);
2078         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
2079                 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
2080                 buf = zmii_dump_regs(dev->zmii_dev, buf);
2081         }
2082         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2083                 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
2084                 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
2085         }
2086         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
2087                 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
2088                 buf = tah_dump_regs(dev->tah_dev, buf);
2089         }
2090 }
2091
2092 static int emac_ethtool_nway_reset(struct net_device *ndev)
2093 {
2094         struct emac_instance *dev = netdev_priv(ndev);
2095         int res = 0;
2096
2097         DBG(dev, "nway_reset" NL);
2098
2099         if (dev->phy.address < 0)
2100                 return -EOPNOTSUPP;
2101
2102         mutex_lock(&dev->link_lock);
2103         if (!dev->phy.autoneg) {
2104                 res = -EINVAL;
2105                 goto out;
2106         }
2107
2108         dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
2109  out:
2110         mutex_unlock(&dev->link_lock);
2111         emac_force_link_update(dev);
2112         return res;
2113 }
2114
2115 static int emac_ethtool_get_stats_count(struct net_device *ndev)
2116 {
2117         return EMAC_ETHTOOL_STATS_COUNT;
2118 }
2119
2120 static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
2121                                      u8 * buf)
2122 {
2123         if (stringset == ETH_SS_STATS)
2124                 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
2125 }
2126
2127 static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
2128                                            struct ethtool_stats *estats,
2129                                            u64 * tmp_stats)
2130 {
2131         struct emac_instance *dev = netdev_priv(ndev);
2132
2133         memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
2134         tmp_stats += sizeof(dev->stats) / sizeof(u64);
2135         memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
2136 }
2137
2138 static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2139                                      struct ethtool_drvinfo *info)
2140 {
2141         struct emac_instance *dev = netdev_priv(ndev);
2142
2143         strcpy(info->driver, "ibm_emac");
2144         strcpy(info->version, DRV_VERSION);
2145         info->fw_version[0] = '\0';
2146         sprintf(info->bus_info, "PPC 4xx EMAC-%d %s",
2147                 dev->cell_index, dev->ofdev->node->full_name);
2148         info->n_stats = emac_ethtool_get_stats_count(ndev);
2149         info->regdump_len = emac_ethtool_get_regs_len(ndev);
2150 }
2151
2152 static const struct ethtool_ops emac_ethtool_ops = {
2153         .get_settings = emac_ethtool_get_settings,
2154         .set_settings = emac_ethtool_set_settings,
2155         .get_drvinfo = emac_ethtool_get_drvinfo,
2156
2157         .get_regs_len = emac_ethtool_get_regs_len,
2158         .get_regs = emac_ethtool_get_regs,
2159
2160         .nway_reset = emac_ethtool_nway_reset,
2161
2162         .get_ringparam = emac_ethtool_get_ringparam,
2163         .get_pauseparam = emac_ethtool_get_pauseparam,
2164
2165         .get_rx_csum = emac_ethtool_get_rx_csum,
2166
2167         .get_strings = emac_ethtool_get_strings,
2168         .get_stats_count = emac_ethtool_get_stats_count,
2169         .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2170
2171         .get_link = ethtool_op_get_link,
2172         .get_tx_csum = ethtool_op_get_tx_csum,
2173         .get_sg = ethtool_op_get_sg,
2174 };
2175
2176 static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2177 {
2178         struct emac_instance *dev = netdev_priv(ndev);
2179         uint16_t *data = (uint16_t *) & rq->ifr_ifru;
2180
2181         DBG(dev, "ioctl %08x" NL, cmd);
2182
2183         if (dev->phy.address < 0)
2184                 return -EOPNOTSUPP;
2185
2186         switch (cmd) {
2187         case SIOCGMIIPHY:
2188         case SIOCDEVPRIVATE:
2189                 data[0] = dev->phy.address;
2190                 /* Fall through */
2191         case SIOCGMIIREG:
2192         case SIOCDEVPRIVATE + 1:
2193                 data[3] = emac_mdio_read(ndev, dev->phy.address, data[1]);
2194                 return 0;
2195
2196         case SIOCSMIIREG:
2197         case SIOCDEVPRIVATE + 2:
2198                 if (!capable(CAP_NET_ADMIN))
2199                         return -EPERM;
2200                 emac_mdio_write(ndev, dev->phy.address, data[1], data[2]);
2201                 return 0;
2202         default:
2203                 return -EOPNOTSUPP;
2204         }
2205 }
2206
2207 struct emac_depentry {
2208         u32                     phandle;
2209         struct device_node      *node;
2210         struct of_device        *ofdev;
2211         void                    *drvdata;
2212 };
2213
2214 #define EMAC_DEP_MAL_IDX        0
2215 #define EMAC_DEP_ZMII_IDX       1
2216 #define EMAC_DEP_RGMII_IDX      2
2217 #define EMAC_DEP_TAH_IDX        3
2218 #define EMAC_DEP_MDIO_IDX       4
2219 #define EMAC_DEP_PREV_IDX       5
2220 #define EMAC_DEP_COUNT          6
2221
2222 static int __devinit emac_check_deps(struct emac_instance *dev,
2223                                      struct emac_depentry *deps)
2224 {
2225         int i, there = 0;
2226         struct device_node *np;
2227
2228         for (i = 0; i < EMAC_DEP_COUNT; i++) {
2229                 /* no dependency on that item, allright */
2230                 if (deps[i].phandle == 0) {
2231                         there++;
2232                         continue;
2233                 }
2234                 /* special case for blist as the dependency might go away */
2235                 if (i == EMAC_DEP_PREV_IDX) {
2236                         np = *(dev->blist - 1);
2237                         if (np == NULL) {
2238                                 deps[i].phandle = 0;
2239                                 there++;
2240                                 continue;
2241                         }
2242                         if (deps[i].node == NULL)
2243                                 deps[i].node = of_node_get(np);
2244                 }
2245                 if (deps[i].node == NULL)
2246                         deps[i].node = of_find_node_by_phandle(deps[i].phandle);
2247                 if (deps[i].node == NULL)
2248                         continue;
2249                 if (deps[i].ofdev == NULL)
2250                         deps[i].ofdev = of_find_device_by_node(deps[i].node);
2251                 if (deps[i].ofdev == NULL)
2252                         continue;
2253                 if (deps[i].drvdata == NULL)
2254                         deps[i].drvdata = dev_get_drvdata(&deps[i].ofdev->dev);
2255                 if (deps[i].drvdata != NULL)
2256                         there++;
2257         }
2258         return (there == EMAC_DEP_COUNT);
2259 }
2260
2261 static void emac_put_deps(struct emac_instance *dev)
2262 {
2263         if (dev->mal_dev)
2264                 of_dev_put(dev->mal_dev);
2265         if (dev->zmii_dev)
2266                 of_dev_put(dev->zmii_dev);
2267         if (dev->rgmii_dev)
2268                 of_dev_put(dev->rgmii_dev);
2269         if (dev->mdio_dev)
2270                 of_dev_put(dev->mdio_dev);
2271         if (dev->tah_dev)
2272                 of_dev_put(dev->tah_dev);
2273 }
2274
2275 static int __devinit emac_of_bus_notify(struct notifier_block *nb,
2276                                         unsigned long action, void *data)
2277 {
2278         /* We are only intereted in device addition */
2279         if (action == BUS_NOTIFY_BOUND_DRIVER)
2280                 wake_up_all(&emac_probe_wait);
2281         return 0;
2282 }
2283
2284 static struct notifier_block emac_of_bus_notifier __devinitdata = {
2285         .notifier_call = emac_of_bus_notify
2286 };
2287
2288 static int __devinit emac_wait_deps(struct emac_instance *dev)
2289 {
2290         struct emac_depentry deps[EMAC_DEP_COUNT];
2291         int i, err;
2292
2293         memset(&deps, 0, sizeof(deps));
2294
2295         deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
2296         deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
2297         deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
2298         if (dev->tah_ph)
2299                 deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
2300         if (dev->mdio_ph)
2301                 deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
2302         if (dev->blist && dev->blist > emac_boot_list)
2303                 deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
2304         bus_register_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2305         wait_event_timeout(emac_probe_wait,
2306                            emac_check_deps(dev, deps),
2307                            EMAC_PROBE_DEP_TIMEOUT);
2308         bus_unregister_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2309         err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2310         for (i = 0; i < EMAC_DEP_COUNT; i++) {
2311                 if (deps[i].node)
2312                         of_node_put(deps[i].node);
2313                 if (err && deps[i].ofdev)
2314                         of_dev_put(deps[i].ofdev);
2315         }
2316         if (err == 0) {
2317                 dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
2318                 dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
2319                 dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
2320                 dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
2321                 dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
2322         }
2323         if (deps[EMAC_DEP_PREV_IDX].ofdev)
2324                 of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
2325         return err;
2326 }
2327
2328 static int __devinit emac_read_uint_prop(struct device_node *np, const char *name,
2329                                          u32 *val, int fatal)
2330 {
2331         int len;
2332         const u32 *prop = of_get_property(np, name, &len);
2333         if (prop == NULL || len < sizeof(u32)) {
2334                 if (fatal)
2335                         printk(KERN_ERR "%s: missing %s property\n",
2336                                np->full_name, name);
2337                 return -ENODEV;
2338         }
2339         *val = *prop;
2340         return 0;
2341 }
2342
2343 static int __devinit emac_init_phy(struct emac_instance *dev)
2344 {
2345         struct device_node *np = dev->ofdev->node;
2346         struct net_device *ndev = dev->ndev;
2347         u32 phy_map, adv;
2348         int i;
2349
2350         dev->phy.dev = ndev;
2351         dev->phy.mode = dev->phy_mode;
2352
2353         /* PHY-less configuration.
2354          * XXX I probably should move these settings to the dev tree
2355          */
2356         if (dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) {
2357                 emac_reset(dev);
2358
2359                 /* PHY-less configuration.
2360                  * XXX I probably should move these settings to the dev tree
2361                  */
2362                 dev->phy.address = -1;
2363                 dev->phy.features = SUPPORTED_100baseT_Full | SUPPORTED_MII;
2364                 dev->phy.pause = 1;
2365
2366                 return 0;
2367         }
2368
2369         mutex_lock(&emac_phy_map_lock);
2370         phy_map = dev->phy_map | busy_phy_map;
2371
2372         DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
2373
2374         dev->phy.mdio_read = emac_mdio_read;
2375         dev->phy.mdio_write = emac_mdio_write;
2376
2377         /* Enable internal clock source */
2378 #ifdef CONFIG_PPC_DCR_NATIVE
2379         if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2380                 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2381 #endif
2382         /* PHY clock workaround */
2383         emac_rx_clk_tx(dev);
2384
2385         /* Enable internal clock source on 440GX*/
2386 #ifdef CONFIG_PPC_DCR_NATIVE
2387         if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2388                 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2389 #endif
2390         /* Configure EMAC with defaults so we can at least use MDIO
2391          * This is needed mostly for 440GX
2392          */
2393         if (emac_phy_gpcs(dev->phy.mode)) {
2394                 /* XXX
2395                  * Make GPCS PHY address equal to EMAC index.
2396                  * We probably should take into account busy_phy_map
2397                  * and/or phy_map here.
2398                  *
2399                  * Note that the busy_phy_map is currently global
2400                  * while it should probably be per-ASIC...
2401                  */
2402                 dev->phy.address = dev->cell_index;
2403         }
2404
2405         emac_configure(dev);
2406
2407         if (dev->phy_address != 0xffffffff)
2408                 phy_map = ~(1 << dev->phy_address);
2409
2410         for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2411                 if (!(phy_map & 1)) {
2412                         int r;
2413                         busy_phy_map |= 1 << i;
2414
2415                         /* Quick check if there is a PHY at the address */
2416                         r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2417                         if (r == 0xffff || r < 0)
2418                                 continue;
2419                         if (!emac_mii_phy_probe(&dev->phy, i))
2420                                 break;
2421                 }
2422
2423         /* Enable external clock source */
2424 #ifdef CONFIG_PPC_DCR_NATIVE
2425         if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2426                 dcri_clrset(SDR0, SDR0_MFR, SDR0_MFR_ECS, 0);
2427 #endif
2428         mutex_unlock(&emac_phy_map_lock);
2429         if (i == 0x20) {
2430                 printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name);
2431                 return -ENXIO;
2432         }
2433
2434         /* Init PHY */
2435         if (dev->phy.def->ops->init)
2436                 dev->phy.def->ops->init(&dev->phy);
2437
2438         /* Disable any PHY features not supported by the platform */
2439         dev->phy.def->features &= ~dev->phy_feat_exc;
2440
2441         /* Setup initial link parameters */
2442         if (dev->phy.features & SUPPORTED_Autoneg) {
2443                 adv = dev->phy.features;
2444                 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
2445                         adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2446                 /* Restart autonegotiation */
2447                 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2448         } else {
2449                 u32 f = dev->phy.def->features;
2450                 int speed = SPEED_10, fd = DUPLEX_HALF;
2451
2452                 /* Select highest supported speed/duplex */
2453                 if (f & SUPPORTED_1000baseT_Full) {
2454                         speed = SPEED_1000;
2455                         fd = DUPLEX_FULL;
2456                 } else if (f & SUPPORTED_1000baseT_Half)
2457                         speed = SPEED_1000;
2458                 else if (f & SUPPORTED_100baseT_Full) {
2459                         speed = SPEED_100;
2460                         fd = DUPLEX_FULL;
2461                 } else if (f & SUPPORTED_100baseT_Half)
2462                         speed = SPEED_100;
2463                 else if (f & SUPPORTED_10baseT_Full)
2464                         fd = DUPLEX_FULL;
2465
2466                 /* Force link parameters */
2467                 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2468         }
2469         return 0;
2470 }
2471
2472 static int __devinit emac_init_config(struct emac_instance *dev)
2473 {
2474         struct device_node *np = dev->ofdev->node;
2475         const void *p;
2476         unsigned int plen;
2477         const char *pm, *phy_modes[] = {
2478                 [PHY_MODE_NA] = "",
2479                 [PHY_MODE_MII] = "mii",
2480                 [PHY_MODE_RMII] = "rmii",
2481                 [PHY_MODE_SMII] = "smii",
2482                 [PHY_MODE_RGMII] = "rgmii",
2483                 [PHY_MODE_TBI] = "tbi",
2484                 [PHY_MODE_GMII] = "gmii",
2485                 [PHY_MODE_RTBI] = "rtbi",
2486                 [PHY_MODE_SGMII] = "sgmii",
2487         };
2488
2489         /* Read config from device-tree */
2490         if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
2491                 return -ENXIO;
2492         if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
2493                 return -ENXIO;
2494         if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
2495                 return -ENXIO;
2496         if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
2497                 return -ENXIO;
2498         if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
2499                 dev->max_mtu = 1500;
2500         if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
2501                 dev->rx_fifo_size = 2048;
2502         if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
2503                 dev->tx_fifo_size = 2048;
2504         if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
2505                 dev->rx_fifo_size_gige = dev->rx_fifo_size;
2506         if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
2507                 dev->tx_fifo_size_gige = dev->tx_fifo_size;
2508         if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
2509                 dev->phy_address = 0xffffffff;
2510         if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
2511                 dev->phy_map = 0xffffffff;
2512         if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
2513                 return -ENXIO;
2514         if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
2515                 dev->tah_ph = 0;
2516         if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
2517                 dev->tah_port = 0;
2518         if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
2519                 dev->mdio_ph = 0;
2520         if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
2521                 dev->zmii_ph = 0;;
2522         if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
2523                 dev->zmii_port = 0xffffffff;;
2524         if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
2525                 dev->rgmii_ph = 0;;
2526         if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
2527                 dev->rgmii_port = 0xffffffff;;
2528         if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
2529                 dev->fifo_entry_size = 16;
2530         if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
2531                 dev->mal_burst_size = 256;
2532
2533         /* PHY mode needs some decoding */
2534         dev->phy_mode = PHY_MODE_NA;
2535         pm = of_get_property(np, "phy-mode", &plen);
2536         if (pm != NULL) {
2537                 int i;
2538                 for (i = 0; i < ARRAY_SIZE(phy_modes); i++)
2539                         if (!strcasecmp(pm, phy_modes[i])) {
2540                                 dev->phy_mode = i;
2541                                 break;
2542                         }
2543         }
2544
2545         /* Backward compat with non-final DT */
2546         if (dev->phy_mode == PHY_MODE_NA && pm != NULL && plen == 4) {
2547                 u32 nmode = *(const u32 *)pm;
2548                 if (nmode > PHY_MODE_NA && nmode <= PHY_MODE_SGMII)
2549                         dev->phy_mode = nmode;
2550         }
2551
2552         /* Check EMAC version */
2553         if (of_device_is_compatible(np, "ibm,emac4")) {
2554                 dev->features |= EMAC_FTR_EMAC4;
2555                 if (of_device_is_compatible(np, "ibm,emac-440gx"))
2556                         dev->features |= EMAC_FTR_440GX_PHY_CLK_FIX;
2557         } else {
2558                 if (of_device_is_compatible(np, "ibm,emac-440ep") ||
2559                     of_device_is_compatible(np, "ibm,emac-440gr"))
2560                         dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2561         }
2562
2563         /* Fixup some feature bits based on the device tree */
2564         if (of_get_property(np, "has-inverted-stacr-oc", NULL))
2565                 dev->features |= EMAC_FTR_STACR_OC_INVERT;
2566         if (of_get_property(np, "has-new-stacr-staopc", NULL))
2567                 dev->features |= EMAC_FTR_HAS_NEW_STACR;
2568
2569         /* CAB lacks the appropriate properties */
2570         if (of_device_is_compatible(np, "ibm,emac-axon"))
2571                 dev->features |= EMAC_FTR_HAS_NEW_STACR |
2572                         EMAC_FTR_STACR_OC_INVERT;
2573
2574         /* Enable TAH/ZMII/RGMII features as found */
2575         if (dev->tah_ph != 0) {
2576 #ifdef CONFIG_IBM_NEW_EMAC_TAH
2577                 dev->features |= EMAC_FTR_HAS_TAH;
2578 #else
2579                 printk(KERN_ERR "%s: TAH support not enabled !\n",
2580                        np->full_name);
2581                 return -ENXIO;
2582 #endif
2583         }
2584
2585         if (dev->zmii_ph != 0) {
2586 #ifdef CONFIG_IBM_NEW_EMAC_ZMII
2587                 dev->features |= EMAC_FTR_HAS_ZMII;
2588 #else
2589                 printk(KERN_ERR "%s: ZMII support not enabled !\n",
2590                        np->full_name);
2591                 return -ENXIO;
2592 #endif
2593         }
2594
2595         if (dev->rgmii_ph != 0) {
2596 #ifdef CONFIG_IBM_NEW_EMAC_RGMII
2597                 dev->features |= EMAC_FTR_HAS_RGMII;
2598 #else
2599                 printk(KERN_ERR "%s: RGMII support not enabled !\n",
2600                        np->full_name);
2601                 return -ENXIO;
2602 #endif
2603         }
2604
2605         /* Read MAC-address */
2606         p = of_get_property(np, "local-mac-address", NULL);
2607         if (p == NULL) {
2608                 printk(KERN_ERR "%s: Can't find local-mac-address property\n",
2609                        np->full_name);
2610                 return -ENXIO;
2611         }
2612         memcpy(dev->ndev->dev_addr, p, 6);
2613
2614         DBG(dev, "features     : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
2615         DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
2616         DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
2617         DBG(dev, "max_mtu      : %d\n", dev->max_mtu);
2618         DBG(dev, "OPB freq     : %d\n", dev->opb_bus_freq);
2619
2620         return 0;
2621 }
2622
2623 static int __devinit emac_probe(struct of_device *ofdev,
2624                                 const struct of_device_id *match)
2625 {
2626         struct net_device *ndev;
2627         struct emac_instance *dev;
2628         struct device_node *np = ofdev->node;
2629         struct device_node **blist = NULL;
2630         int err, i;
2631
2632         /* Skip unused/unwired EMACS.  We leave the check for an unused
2633          * property here for now, but new flat device trees should set a
2634          * status property to "disabled" instead.
2635          */
2636         if (of_get_property(np, "unused", NULL) || !of_device_is_available(np))
2637                 return -ENODEV;
2638
2639         /* Find ourselves in the bootlist if we are there */
2640         for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2641                 if (emac_boot_list[i] == np)
2642                         blist = &emac_boot_list[i];
2643
2644         /* Allocate our net_device structure */
2645         err = -ENOMEM;
2646         ndev = alloc_etherdev(sizeof(struct emac_instance));
2647         if (!ndev) {
2648                 printk(KERN_ERR "%s: could not allocate ethernet device!\n",
2649                        np->full_name);
2650                 goto err_gone;
2651         }
2652         dev = netdev_priv(ndev);
2653         dev->ndev = ndev;
2654         dev->ofdev = ofdev;
2655         dev->blist = blist;
2656         SET_NETDEV_DEV(ndev, &ofdev->dev);
2657
2658         /* Initialize some embedded data structures */
2659         mutex_init(&dev->mdio_lock);
2660         mutex_init(&dev->link_lock);
2661         spin_lock_init(&dev->lock);
2662         INIT_WORK(&dev->reset_work, emac_reset_work);
2663
2664         /* Init various config data based on device-tree */
2665         err = emac_init_config(dev);
2666         if (err != 0)
2667                 goto err_free;
2668
2669         /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
2670         dev->emac_irq = irq_of_parse_and_map(np, 0);
2671         dev->wol_irq = irq_of_parse_and_map(np, 1);
2672         if (dev->emac_irq == NO_IRQ) {
2673                 printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name);
2674                 goto err_free;
2675         }
2676         ndev->irq = dev->emac_irq;
2677
2678         /* Map EMAC regs */
2679         if (of_address_to_resource(np, 0, &dev->rsrc_regs)) {
2680                 printk(KERN_ERR "%s: Can't get registers address\n",
2681                        np->full_name);
2682                 goto err_irq_unmap;
2683         }
2684         // TODO : request_mem_region
2685         dev->emacp = ioremap(dev->rsrc_regs.start, sizeof(struct emac_regs));
2686         if (dev->emacp == NULL) {
2687                 printk(KERN_ERR "%s: Can't map device registers!\n",
2688                        np->full_name);
2689                 err = -ENOMEM;
2690                 goto err_irq_unmap;
2691         }
2692
2693         /* Wait for dependent devices */
2694         err = emac_wait_deps(dev);
2695         if (err) {
2696                 printk(KERN_ERR
2697                        "%s: Timeout waiting for dependent devices\n",
2698                        np->full_name);
2699                 /*  display more info about what's missing ? */
2700                 goto err_reg_unmap;
2701         }
2702         dev->mal = dev_get_drvdata(&dev->mal_dev->dev);
2703         if (dev->mdio_dev != NULL)
2704                 dev->mdio_instance = dev_get_drvdata(&dev->mdio_dev->dev);
2705
2706         /* Register with MAL */
2707         dev->commac.ops = &emac_commac_ops;
2708         dev->commac.dev = dev;
2709         dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
2710         dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
2711         err = mal_register_commac(dev->mal, &dev->commac);
2712         if (err) {
2713                 printk(KERN_ERR "%s: failed to register with mal %s!\n",
2714                        np->full_name, dev->mal_dev->node->full_name);
2715                 goto err_rel_deps;
2716         }
2717         dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
2718         dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
2719
2720         /* Get pointers to BD rings */
2721         dev->tx_desc =
2722             dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
2723         dev->rx_desc =
2724             dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
2725
2726         DBG(dev, "tx_desc %p" NL, dev->tx_desc);
2727         DBG(dev, "rx_desc %p" NL, dev->rx_desc);
2728
2729         /* Clean rings */
2730         memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
2731         memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
2732         memset(dev->tx_skb, 0, NUM_TX_BUFF * sizeof(struct sk_buff *));
2733         memset(dev->rx_skb, 0, NUM_RX_BUFF * sizeof(struct sk_buff *));
2734
2735         /* Attach to ZMII, if needed */
2736         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
2737             (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
2738                 goto err_unreg_commac;
2739
2740         /* Attach to RGMII, if needed */
2741         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
2742             (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
2743                 goto err_detach_zmii;
2744
2745         /* Attach to TAH, if needed */
2746         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
2747             (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
2748                 goto err_detach_rgmii;
2749
2750         /* Set some link defaults before we can find out real parameters */
2751         dev->phy.speed = SPEED_100;
2752         dev->phy.duplex = DUPLEX_FULL;
2753         dev->phy.autoneg = AUTONEG_DISABLE;
2754         dev->phy.pause = dev->phy.asym_pause = 0;
2755         dev->stop_timeout = STOP_TIMEOUT_100;
2756         INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
2757
2758         /* Find PHY if any */
2759         err = emac_init_phy(dev);
2760         if (err != 0)
2761                 goto err_detach_tah;
2762
2763         /* Fill in the driver function table */
2764         ndev->open = &emac_open;
2765         if (dev->tah_dev)
2766                 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
2767         ndev->tx_timeout = &emac_tx_timeout;
2768         ndev->watchdog_timeo = 5 * HZ;
2769         ndev->stop = &emac_close;
2770         ndev->get_stats = &emac_stats;
2771         ndev->set_multicast_list = &emac_set_multicast_list;
2772         ndev->do_ioctl = &emac_ioctl;
2773         if (emac_phy_supports_gige(dev->phy_mode)) {
2774                 ndev->hard_start_xmit = &emac_start_xmit_sg;
2775                 ndev->change_mtu = &emac_change_mtu;
2776                 dev->commac.ops = &emac_commac_sg_ops;
2777         } else {
2778                 ndev->hard_start_xmit = &emac_start_xmit;
2779         }
2780         SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2781
2782         netif_carrier_off(ndev);
2783         netif_stop_queue(ndev);
2784
2785         err = register_netdev(ndev);
2786         if (err) {
2787                 printk(KERN_ERR "%s: failed to register net device (%d)!\n",
2788                        np->full_name, err);
2789                 goto err_detach_tah;
2790         }
2791
2792         /* Set our drvdata last as we don't want them visible until we are
2793          * fully initialized
2794          */
2795         wmb();
2796         dev_set_drvdata(&ofdev->dev, dev);
2797
2798         /* There's a new kid in town ! Let's tell everybody */
2799         wake_up_all(&emac_probe_wait);
2800
2801
2802         printk(KERN_INFO
2803                "%s: EMAC-%d %s, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
2804                ndev->name, dev->cell_index, np->full_name,
2805                ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
2806                ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
2807
2808         if (dev->phy.address >= 0)
2809                 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2810                        dev->phy.def->name, dev->phy.address);
2811
2812         emac_dbg_register(dev);
2813
2814         /* Life is good */
2815         return 0;
2816
2817         /* I have a bad feeling about this ... */
2818
2819  err_detach_tah:
2820         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2821                 tah_detach(dev->tah_dev, dev->tah_port);
2822  err_detach_rgmii:
2823         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2824                 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2825  err_detach_zmii:
2826         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2827                 zmii_detach(dev->zmii_dev, dev->zmii_port);
2828  err_unreg_commac:
2829         mal_unregister_commac(dev->mal, &dev->commac);
2830  err_rel_deps:
2831         emac_put_deps(dev);
2832  err_reg_unmap:
2833         iounmap(dev->emacp);
2834  err_irq_unmap:
2835         if (dev->wol_irq != NO_IRQ)
2836                 irq_dispose_mapping(dev->wol_irq);
2837         if (dev->emac_irq != NO_IRQ)
2838                 irq_dispose_mapping(dev->emac_irq);
2839  err_free:
2840         kfree(ndev);
2841  err_gone:
2842         /* if we were on the bootlist, remove us as we won't show up and
2843          * wake up all waiters to notify them in case they were waiting
2844          * on us
2845          */
2846         if (blist) {
2847                 *blist = NULL;
2848                 wake_up_all(&emac_probe_wait);
2849         }
2850         return err;
2851 }
2852
2853 static int __devexit emac_remove(struct of_device *ofdev)
2854 {
2855         struct emac_instance *dev = dev_get_drvdata(&ofdev->dev);
2856
2857         DBG(dev, "remove" NL);
2858
2859         dev_set_drvdata(&ofdev->dev, NULL);
2860
2861         unregister_netdev(dev->ndev);
2862
2863         flush_scheduled_work();
2864
2865         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2866                 tah_detach(dev->tah_dev, dev->tah_port);
2867         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2868                 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2869         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2870                 zmii_detach(dev->zmii_dev, dev->zmii_port);
2871
2872         mal_unregister_commac(dev->mal, &dev->commac);
2873         emac_put_deps(dev);
2874
2875         emac_dbg_unregister(dev);
2876         iounmap(dev->emacp);
2877
2878         if (dev->wol_irq != NO_IRQ)
2879                 irq_dispose_mapping(dev->wol_irq);
2880         if (dev->emac_irq != NO_IRQ)
2881                 irq_dispose_mapping(dev->emac_irq);
2882
2883         kfree(dev->ndev);
2884
2885         return 0;
2886 }
2887
2888 /* XXX Features in here should be replaced by properties... */
2889 static struct of_device_id emac_match[] =
2890 {
2891         {
2892                 .type           = "network",
2893                 .compatible     = "ibm,emac",
2894         },
2895         {
2896                 .type           = "network",
2897                 .compatible     = "ibm,emac4",
2898         },
2899         {},
2900 };
2901
2902 static struct of_platform_driver emac_driver = {
2903         .name = "emac",
2904         .match_table = emac_match,
2905
2906         .probe = emac_probe,
2907         .remove = emac_remove,
2908 };
2909
2910 static void __init emac_make_bootlist(void)
2911 {
2912         struct device_node *np = NULL;
2913         int j, max, i = 0, k;
2914         int cell_indices[EMAC_BOOT_LIST_SIZE];
2915
2916         /* Collect EMACs */
2917         while((np = of_find_all_nodes(np)) != NULL) {
2918                 const u32 *idx;
2919
2920                 if (of_match_node(emac_match, np) == NULL)
2921                         continue;
2922                 if (of_get_property(np, "unused", NULL))
2923                         continue;
2924                 idx = of_get_property(np, "cell-index", NULL);
2925                 if (idx == NULL)
2926                         continue;
2927                 cell_indices[i] = *idx;
2928                 emac_boot_list[i++] = of_node_get(np);
2929                 if (i >= EMAC_BOOT_LIST_SIZE) {
2930                         of_node_put(np);
2931                         break;
2932                 }
2933         }
2934         max = i;
2935
2936         /* Bubble sort them (doh, what a creative algorithm :-) */
2937         for (i = 0; max > 1 && (i < (max - 1)); i++)
2938                 for (j = i; j < max; j++) {
2939                         if (cell_indices[i] > cell_indices[j]) {
2940                                 np = emac_boot_list[i];
2941                                 emac_boot_list[i] = emac_boot_list[j];
2942                                 emac_boot_list[j] = np;
2943                                 k = cell_indices[i];
2944                                 cell_indices[i] = cell_indices[j];
2945                                 cell_indices[j] = k;
2946                         }
2947                 }
2948 }
2949
2950 static int __init emac_init(void)
2951 {
2952         int rc;
2953
2954         printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
2955
2956         /* Init debug stuff */
2957         emac_init_debug();
2958
2959         /* Build EMAC boot list */
2960         emac_make_bootlist();
2961
2962         /* Init submodules */
2963         rc = mal_init();
2964         if (rc)
2965                 goto err;
2966         rc = zmii_init();
2967         if (rc)
2968                 goto err_mal;
2969         rc = rgmii_init();
2970         if (rc)
2971                 goto err_zmii;
2972         rc = tah_init();
2973         if (rc)
2974                 goto err_rgmii;
2975         rc = of_register_platform_driver(&emac_driver);
2976         if (rc)
2977                 goto err_tah;
2978
2979         return 0;
2980
2981  err_tah:
2982         tah_exit();
2983  err_rgmii:
2984         rgmii_exit();
2985  err_zmii:
2986         zmii_exit();
2987  err_mal:
2988         mal_exit();
2989  err:
2990         return rc;
2991 }
2992
2993 static void __exit emac_exit(void)
2994 {
2995         int i;
2996
2997         of_unregister_platform_driver(&emac_driver);
2998
2999         tah_exit();
3000         rgmii_exit();
3001         zmii_exit();
3002         mal_exit();
3003         emac_fini_debug();
3004
3005         /* Destroy EMAC boot list */
3006         for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
3007                 if (emac_boot_list[i])
3008                         of_node_put(emac_boot_list[i]);
3009 }
3010
3011 module_init(emac_init);
3012 module_exit(emac_exit);