net: use netdev_mc_count and netdev_mc_empty when appropriate
[linux-2.6.git] / drivers / net / niu.c
1 /* niu.c: Neptune ethernet driver.
2  *
3  * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
4  */
5
6 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/pci.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/netdevice.h>
11 #include <linux/ethtool.h>
12 #include <linux/etherdevice.h>
13 #include <linux/platform_device.h>
14 #include <linux/delay.h>
15 #include <linux/bitops.h>
16 #include <linux/mii.h>
17 #include <linux/if_ether.h>
18 #include <linux/if_vlan.h>
19 #include <linux/ip.h>
20 #include <linux/in.h>
21 #include <linux/ipv6.h>
22 #include <linux/log2.h>
23 #include <linux/jiffies.h>
24 #include <linux/crc32.h>
25 #include <linux/list.h>
26
27 #include <linux/io.h>
28
29 #ifdef CONFIG_SPARC64
30 #include <linux/of_device.h>
31 #endif
32
33 #include "niu.h"
34
35 #define DRV_MODULE_NAME         "niu"
36 #define PFX DRV_MODULE_NAME     ": "
37 #define DRV_MODULE_VERSION      "1.0"
38 #define DRV_MODULE_RELDATE      "Nov 14, 2008"
39
40 static char version[] __devinitdata =
41         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
42
43 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
44 MODULE_DESCRIPTION("NIU ethernet driver");
45 MODULE_LICENSE("GPL");
46 MODULE_VERSION(DRV_MODULE_VERSION);
47
48 #ifndef readq
49 static u64 readq(void __iomem *reg)
50 {
51         return ((u64) readl(reg)) | (((u64) readl(reg + 4UL)) << 32);
52 }
53
54 static void writeq(u64 val, void __iomem *reg)
55 {
56         writel(val & 0xffffffff, reg);
57         writel(val >> 32, reg + 0x4UL);
58 }
59 #endif
60
61 static DEFINE_PCI_DEVICE_TABLE(niu_pci_tbl) = {
62         {PCI_DEVICE(PCI_VENDOR_ID_SUN, 0xabcd)},
63         {}
64 };
65
66 MODULE_DEVICE_TABLE(pci, niu_pci_tbl);
67
68 #define NIU_TX_TIMEOUT                  (5 * HZ)
69
70 #define nr64(reg)               readq(np->regs + (reg))
71 #define nw64(reg, val)          writeq((val), np->regs + (reg))
72
73 #define nr64_mac(reg)           readq(np->mac_regs + (reg))
74 #define nw64_mac(reg, val)      writeq((val), np->mac_regs + (reg))
75
76 #define nr64_ipp(reg)           readq(np->regs + np->ipp_off + (reg))
77 #define nw64_ipp(reg, val)      writeq((val), np->regs + np->ipp_off + (reg))
78
79 #define nr64_pcs(reg)           readq(np->regs + np->pcs_off + (reg))
80 #define nw64_pcs(reg, val)      writeq((val), np->regs + np->pcs_off + (reg))
81
82 #define nr64_xpcs(reg)          readq(np->regs + np->xpcs_off + (reg))
83 #define nw64_xpcs(reg, val)     writeq((val), np->regs + np->xpcs_off + (reg))
84
85 #define NIU_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
86
87 static int niu_debug;
88 static int debug = -1;
89 module_param(debug, int, 0);
90 MODULE_PARM_DESC(debug, "NIU debug level");
91
92 #define niudbg(TYPE, f, a...) \
93 do {    if ((np)->msg_enable & NETIF_MSG_##TYPE) \
94                 printk(KERN_DEBUG PFX f, ## a); \
95 } while (0)
96
97 #define niuinfo(TYPE, f, a...) \
98 do {    if ((np)->msg_enable & NETIF_MSG_##TYPE) \
99                 printk(KERN_INFO PFX f, ## a); \
100 } while (0)
101
102 #define niuwarn(TYPE, f, a...) \
103 do {    if ((np)->msg_enable & NETIF_MSG_##TYPE) \
104                 printk(KERN_WARNING PFX f, ## a); \
105 } while (0)
106
107 #define niu_lock_parent(np, flags) \
108         spin_lock_irqsave(&np->parent->lock, flags)
109 #define niu_unlock_parent(np, flags) \
110         spin_unlock_irqrestore(&np->parent->lock, flags)
111
112 static int serdes_init_10g_serdes(struct niu *np);
113
114 static int __niu_wait_bits_clear_mac(struct niu *np, unsigned long reg,
115                                      u64 bits, int limit, int delay)
116 {
117         while (--limit >= 0) {
118                 u64 val = nr64_mac(reg);
119
120                 if (!(val & bits))
121                         break;
122                 udelay(delay);
123         }
124         if (limit < 0)
125                 return -ENODEV;
126         return 0;
127 }
128
129 static int __niu_set_and_wait_clear_mac(struct niu *np, unsigned long reg,
130                                         u64 bits, int limit, int delay,
131                                         const char *reg_name)
132 {
133         int err;
134
135         nw64_mac(reg, bits);
136         err = __niu_wait_bits_clear_mac(np, reg, bits, limit, delay);
137         if (err)
138                 dev_err(np->device, PFX "%s: bits (%llx) of register %s "
139                         "would not clear, val[%llx]\n",
140                         np->dev->name, (unsigned long long) bits, reg_name,
141                         (unsigned long long) nr64_mac(reg));
142         return err;
143 }
144
145 #define niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
146 ({      BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
147         __niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
148 })
149
150 static int __niu_wait_bits_clear_ipp(struct niu *np, unsigned long reg,
151                                      u64 bits, int limit, int delay)
152 {
153         while (--limit >= 0) {
154                 u64 val = nr64_ipp(reg);
155
156                 if (!(val & bits))
157                         break;
158                 udelay(delay);
159         }
160         if (limit < 0)
161                 return -ENODEV;
162         return 0;
163 }
164
165 static int __niu_set_and_wait_clear_ipp(struct niu *np, unsigned long reg,
166                                         u64 bits, int limit, int delay,
167                                         const char *reg_name)
168 {
169         int err;
170         u64 val;
171
172         val = nr64_ipp(reg);
173         val |= bits;
174         nw64_ipp(reg, val);
175
176         err = __niu_wait_bits_clear_ipp(np, reg, bits, limit, delay);
177         if (err)
178                 dev_err(np->device, PFX "%s: bits (%llx) of register %s "
179                         "would not clear, val[%llx]\n",
180                         np->dev->name, (unsigned long long) bits, reg_name,
181                         (unsigned long long) nr64_ipp(reg));
182         return err;
183 }
184
185 #define niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
186 ({      BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
187         __niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
188 })
189
190 static int __niu_wait_bits_clear(struct niu *np, unsigned long reg,
191                                  u64 bits, int limit, int delay)
192 {
193         while (--limit >= 0) {
194                 u64 val = nr64(reg);
195
196                 if (!(val & bits))
197                         break;
198                 udelay(delay);
199         }
200         if (limit < 0)
201                 return -ENODEV;
202         return 0;
203 }
204
205 #define niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY) \
206 ({      BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
207         __niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY); \
208 })
209
210 static int __niu_set_and_wait_clear(struct niu *np, unsigned long reg,
211                                     u64 bits, int limit, int delay,
212                                     const char *reg_name)
213 {
214         int err;
215
216         nw64(reg, bits);
217         err = __niu_wait_bits_clear(np, reg, bits, limit, delay);
218         if (err)
219                 dev_err(np->device, PFX "%s: bits (%llx) of register %s "
220                         "would not clear, val[%llx]\n",
221                         np->dev->name, (unsigned long long) bits, reg_name,
222                         (unsigned long long) nr64(reg));
223         return err;
224 }
225
226 #define niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
227 ({      BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
228         __niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
229 })
230
231 static void niu_ldg_rearm(struct niu *np, struct niu_ldg *lp, int on)
232 {
233         u64 val = (u64) lp->timer;
234
235         if (on)
236                 val |= LDG_IMGMT_ARM;
237
238         nw64(LDG_IMGMT(lp->ldg_num), val);
239 }
240
241 static int niu_ldn_irq_enable(struct niu *np, int ldn, int on)
242 {
243         unsigned long mask_reg, bits;
244         u64 val;
245
246         if (ldn < 0 || ldn > LDN_MAX)
247                 return -EINVAL;
248
249         if (ldn < 64) {
250                 mask_reg = LD_IM0(ldn);
251                 bits = LD_IM0_MASK;
252         } else {
253                 mask_reg = LD_IM1(ldn - 64);
254                 bits = LD_IM1_MASK;
255         }
256
257         val = nr64(mask_reg);
258         if (on)
259                 val &= ~bits;
260         else
261                 val |= bits;
262         nw64(mask_reg, val);
263
264         return 0;
265 }
266
267 static int niu_enable_ldn_in_ldg(struct niu *np, struct niu_ldg *lp, int on)
268 {
269         struct niu_parent *parent = np->parent;
270         int i;
271
272         for (i = 0; i <= LDN_MAX; i++) {
273                 int err;
274
275                 if (parent->ldg_map[i] != lp->ldg_num)
276                         continue;
277
278                 err = niu_ldn_irq_enable(np, i, on);
279                 if (err)
280                         return err;
281         }
282         return 0;
283 }
284
285 static int niu_enable_interrupts(struct niu *np, int on)
286 {
287         int i;
288
289         for (i = 0; i < np->num_ldg; i++) {
290                 struct niu_ldg *lp = &np->ldg[i];
291                 int err;
292
293                 err = niu_enable_ldn_in_ldg(np, lp, on);
294                 if (err)
295                         return err;
296         }
297         for (i = 0; i < np->num_ldg; i++)
298                 niu_ldg_rearm(np, &np->ldg[i], on);
299
300         return 0;
301 }
302
303 static u32 phy_encode(u32 type, int port)
304 {
305         return (type << (port * 2));
306 }
307
308 static u32 phy_decode(u32 val, int port)
309 {
310         return (val >> (port * 2)) & PORT_TYPE_MASK;
311 }
312
313 static int mdio_wait(struct niu *np)
314 {
315         int limit = 1000;
316         u64 val;
317
318         while (--limit > 0) {
319                 val = nr64(MIF_FRAME_OUTPUT);
320                 if ((val >> MIF_FRAME_OUTPUT_TA_SHIFT) & 0x1)
321                         return val & MIF_FRAME_OUTPUT_DATA;
322
323                 udelay(10);
324         }
325
326         return -ENODEV;
327 }
328
329 static int mdio_read(struct niu *np, int port, int dev, int reg)
330 {
331         int err;
332
333         nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg));
334         err = mdio_wait(np);
335         if (err < 0)
336                 return err;
337
338         nw64(MIF_FRAME_OUTPUT, MDIO_READ_OP(port, dev));
339         return mdio_wait(np);
340 }
341
342 static int mdio_write(struct niu *np, int port, int dev, int reg, int data)
343 {
344         int err;
345
346         nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg));
347         err = mdio_wait(np);
348         if (err < 0)
349                 return err;
350
351         nw64(MIF_FRAME_OUTPUT, MDIO_WRITE_OP(port, dev, data));
352         err = mdio_wait(np);
353         if (err < 0)
354                 return err;
355
356         return 0;
357 }
358
359 static int mii_read(struct niu *np, int port, int reg)
360 {
361         nw64(MIF_FRAME_OUTPUT, MII_READ_OP(port, reg));
362         return mdio_wait(np);
363 }
364
365 static int mii_write(struct niu *np, int port, int reg, int data)
366 {
367         int err;
368
369         nw64(MIF_FRAME_OUTPUT, MII_WRITE_OP(port, reg, data));
370         err = mdio_wait(np);
371         if (err < 0)
372                 return err;
373
374         return 0;
375 }
376
377 static int esr2_set_tx_cfg(struct niu *np, unsigned long channel, u32 val)
378 {
379         int err;
380
381         err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
382                          ESR2_TI_PLL_TX_CFG_L(channel),
383                          val & 0xffff);
384         if (!err)
385                 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
386                                  ESR2_TI_PLL_TX_CFG_H(channel),
387                                  val >> 16);
388         return err;
389 }
390
391 static int esr2_set_rx_cfg(struct niu *np, unsigned long channel, u32 val)
392 {
393         int err;
394
395         err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
396                          ESR2_TI_PLL_RX_CFG_L(channel),
397                          val & 0xffff);
398         if (!err)
399                 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
400                                  ESR2_TI_PLL_RX_CFG_H(channel),
401                                  val >> 16);
402         return err;
403 }
404
405 /* Mode is always 10G fiber.  */
406 static int serdes_init_niu_10g_fiber(struct niu *np)
407 {
408         struct niu_link_config *lp = &np->link_config;
409         u32 tx_cfg, rx_cfg;
410         unsigned long i;
411
412         tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV);
413         rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
414                   PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
415                   PLL_RX_CFG_EQ_LP_ADAPTIVE);
416
417         if (lp->loopback_mode == LOOPBACK_PHY) {
418                 u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
419
420                 mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
421                            ESR2_TI_PLL_TEST_CFG_L, test_cfg);
422
423                 tx_cfg |= PLL_TX_CFG_ENTEST;
424                 rx_cfg |= PLL_RX_CFG_ENTEST;
425         }
426
427         /* Initialize all 4 lanes of the SERDES.  */
428         for (i = 0; i < 4; i++) {
429                 int err = esr2_set_tx_cfg(np, i, tx_cfg);
430                 if (err)
431                         return err;
432         }
433
434         for (i = 0; i < 4; i++) {
435                 int err = esr2_set_rx_cfg(np, i, rx_cfg);
436                 if (err)
437                         return err;
438         }
439
440         return 0;
441 }
442
443 static int serdes_init_niu_1g_serdes(struct niu *np)
444 {
445         struct niu_link_config *lp = &np->link_config;
446         u16 pll_cfg, pll_sts;
447         int max_retry = 100;
448         u64 uninitialized_var(sig), mask, val;
449         u32 tx_cfg, rx_cfg;
450         unsigned long i;
451         int err;
452
453         tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV |
454                   PLL_TX_CFG_RATE_HALF);
455         rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
456                   PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
457                   PLL_RX_CFG_RATE_HALF);
458
459         if (np->port == 0)
460                 rx_cfg |= PLL_RX_CFG_EQ_LP_ADAPTIVE;
461
462         if (lp->loopback_mode == LOOPBACK_PHY) {
463                 u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
464
465                 mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
466                            ESR2_TI_PLL_TEST_CFG_L, test_cfg);
467
468                 tx_cfg |= PLL_TX_CFG_ENTEST;
469                 rx_cfg |= PLL_RX_CFG_ENTEST;
470         }
471
472         /* Initialize PLL for 1G */
473         pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_8X);
474
475         err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
476                          ESR2_TI_PLL_CFG_L, pll_cfg);
477         if (err) {
478                 dev_err(np->device, PFX "NIU Port %d "
479                         "serdes_init_niu_1g_serdes: "
480                         "mdio write to ESR2_TI_PLL_CFG_L failed", np->port);
481                 return err;
482         }
483
484         pll_sts = PLL_CFG_ENPLL;
485
486         err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
487                          ESR2_TI_PLL_STS_L, pll_sts);
488         if (err) {
489                 dev_err(np->device, PFX "NIU Port %d "
490                         "serdes_init_niu_1g_serdes: "
491                         "mdio write to ESR2_TI_PLL_STS_L failed", np->port);
492                 return err;
493         }
494
495         udelay(200);
496
497         /* Initialize all 4 lanes of the SERDES.  */
498         for (i = 0; i < 4; i++) {
499                 err = esr2_set_tx_cfg(np, i, tx_cfg);
500                 if (err)
501                         return err;
502         }
503
504         for (i = 0; i < 4; i++) {
505                 err = esr2_set_rx_cfg(np, i, rx_cfg);
506                 if (err)
507                         return err;
508         }
509
510         switch (np->port) {
511         case 0:
512                 val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0);
513                 mask = val;
514                 break;
515
516         case 1:
517                 val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1);
518                 mask = val;
519                 break;
520
521         default:
522                 return -EINVAL;
523         }
524
525         while (max_retry--) {
526                 sig = nr64(ESR_INT_SIGNALS);
527                 if ((sig & mask) == val)
528                         break;
529
530                 mdelay(500);
531         }
532
533         if ((sig & mask) != val) {
534                 dev_err(np->device, PFX "Port %u signal bits [%08x] are not "
535                         "[%08x]\n", np->port, (int) (sig & mask), (int) val);
536                 return -ENODEV;
537         }
538
539         return 0;
540 }
541
542 static int serdes_init_niu_10g_serdes(struct niu *np)
543 {
544         struct niu_link_config *lp = &np->link_config;
545         u32 tx_cfg, rx_cfg, pll_cfg, pll_sts;
546         int max_retry = 100;
547         u64 uninitialized_var(sig), mask, val;
548         unsigned long i;
549         int err;
550
551         tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV);
552         rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
553                   PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
554                   PLL_RX_CFG_EQ_LP_ADAPTIVE);
555
556         if (lp->loopback_mode == LOOPBACK_PHY) {
557                 u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
558
559                 mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
560                            ESR2_TI_PLL_TEST_CFG_L, test_cfg);
561
562                 tx_cfg |= PLL_TX_CFG_ENTEST;
563                 rx_cfg |= PLL_RX_CFG_ENTEST;
564         }
565
566         /* Initialize PLL for 10G */
567         pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_10X);
568
569         err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
570                          ESR2_TI_PLL_CFG_L, pll_cfg & 0xffff);
571         if (err) {
572                 dev_err(np->device, PFX "NIU Port %d "
573                         "serdes_init_niu_10g_serdes: "
574                         "mdio write to ESR2_TI_PLL_CFG_L failed", np->port);
575                 return err;
576         }
577
578         pll_sts = PLL_CFG_ENPLL;
579
580         err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
581                          ESR2_TI_PLL_STS_L, pll_sts & 0xffff);
582         if (err) {
583                 dev_err(np->device, PFX "NIU Port %d "
584                         "serdes_init_niu_10g_serdes: "
585                         "mdio write to ESR2_TI_PLL_STS_L failed", np->port);
586                 return err;
587         }
588
589         udelay(200);
590
591         /* Initialize all 4 lanes of the SERDES.  */
592         for (i = 0; i < 4; i++) {
593                 err = esr2_set_tx_cfg(np, i, tx_cfg);
594                 if (err)
595                         return err;
596         }
597
598         for (i = 0; i < 4; i++) {
599                 err = esr2_set_rx_cfg(np, i, rx_cfg);
600                 if (err)
601                         return err;
602         }
603
604         /* check if serdes is ready */
605
606         switch (np->port) {
607         case 0:
608                 mask = ESR_INT_SIGNALS_P0_BITS;
609                 val = (ESR_INT_SRDY0_P0 |
610                        ESR_INT_DET0_P0 |
611                        ESR_INT_XSRDY_P0 |
612                        ESR_INT_XDP_P0_CH3 |
613                        ESR_INT_XDP_P0_CH2 |
614                        ESR_INT_XDP_P0_CH1 |
615                        ESR_INT_XDP_P0_CH0);
616                 break;
617
618         case 1:
619                 mask = ESR_INT_SIGNALS_P1_BITS;
620                 val = (ESR_INT_SRDY0_P1 |
621                        ESR_INT_DET0_P1 |
622                        ESR_INT_XSRDY_P1 |
623                        ESR_INT_XDP_P1_CH3 |
624                        ESR_INT_XDP_P1_CH2 |
625                        ESR_INT_XDP_P1_CH1 |
626                        ESR_INT_XDP_P1_CH0);
627                 break;
628
629         default:
630                 return -EINVAL;
631         }
632
633         while (max_retry--) {
634                 sig = nr64(ESR_INT_SIGNALS);
635                 if ((sig & mask) == val)
636                         break;
637
638                 mdelay(500);
639         }
640
641         if ((sig & mask) != val) {
642                 pr_info(PFX "NIU Port %u signal bits [%08x] are not "
643                         "[%08x] for 10G...trying 1G\n",
644                         np->port, (int) (sig & mask), (int) val);
645
646                 /* 10G failed, try initializing at 1G */
647                 err = serdes_init_niu_1g_serdes(np);
648                 if (!err) {
649                         np->flags &= ~NIU_FLAGS_10G;
650                         np->mac_xcvr = MAC_XCVR_PCS;
651                 }  else {
652                         dev_err(np->device, PFX "Port %u 10G/1G SERDES "
653                                 "Link Failed \n", np->port);
654                         return -ENODEV;
655                 }
656         }
657         return 0;
658 }
659
660 static int esr_read_rxtx_ctrl(struct niu *np, unsigned long chan, u32 *val)
661 {
662         int err;
663
664         err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_CTRL_L(chan));
665         if (err >= 0) {
666                 *val = (err & 0xffff);
667                 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
668                                 ESR_RXTX_CTRL_H(chan));
669                 if (err >= 0)
670                         *val |= ((err & 0xffff) << 16);
671                 err = 0;
672         }
673         return err;
674 }
675
676 static int esr_read_glue0(struct niu *np, unsigned long chan, u32 *val)
677 {
678         int err;
679
680         err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
681                         ESR_GLUE_CTRL0_L(chan));
682         if (err >= 0) {
683                 *val = (err & 0xffff);
684                 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
685                                 ESR_GLUE_CTRL0_H(chan));
686                 if (err >= 0) {
687                         *val |= ((err & 0xffff) << 16);
688                         err = 0;
689                 }
690         }
691         return err;
692 }
693
694 static int esr_read_reset(struct niu *np, u32 *val)
695 {
696         int err;
697
698         err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
699                         ESR_RXTX_RESET_CTRL_L);
700         if (err >= 0) {
701                 *val = (err & 0xffff);
702                 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
703                                 ESR_RXTX_RESET_CTRL_H);
704                 if (err >= 0) {
705                         *val |= ((err & 0xffff) << 16);
706                         err = 0;
707                 }
708         }
709         return err;
710 }
711
712 static int esr_write_rxtx_ctrl(struct niu *np, unsigned long chan, u32 val)
713 {
714         int err;
715
716         err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
717                          ESR_RXTX_CTRL_L(chan), val & 0xffff);
718         if (!err)
719                 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
720                                  ESR_RXTX_CTRL_H(chan), (val >> 16));
721         return err;
722 }
723
724 static int esr_write_glue0(struct niu *np, unsigned long chan, u32 val)
725 {
726         int err;
727
728         err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
729                         ESR_GLUE_CTRL0_L(chan), val & 0xffff);
730         if (!err)
731                 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
732                                  ESR_GLUE_CTRL0_H(chan), (val >> 16));
733         return err;
734 }
735
736 static int esr_reset(struct niu *np)
737 {
738         u32 uninitialized_var(reset);
739         int err;
740
741         err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
742                          ESR_RXTX_RESET_CTRL_L, 0x0000);
743         if (err)
744                 return err;
745         err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
746                          ESR_RXTX_RESET_CTRL_H, 0xffff);
747         if (err)
748                 return err;
749         udelay(200);
750
751         err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
752                          ESR_RXTX_RESET_CTRL_L, 0xffff);
753         if (err)
754                 return err;
755         udelay(200);
756
757         err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
758                          ESR_RXTX_RESET_CTRL_H, 0x0000);
759         if (err)
760                 return err;
761         udelay(200);
762
763         err = esr_read_reset(np, &reset);
764         if (err)
765                 return err;
766         if (reset != 0) {
767                 dev_err(np->device, PFX "Port %u ESR_RESET "
768                         "did not clear [%08x]\n",
769                         np->port, reset);
770                 return -ENODEV;
771         }
772
773         return 0;
774 }
775
776 static int serdes_init_10g(struct niu *np)
777 {
778         struct niu_link_config *lp = &np->link_config;
779         unsigned long ctrl_reg, test_cfg_reg, i;
780         u64 ctrl_val, test_cfg_val, sig, mask, val;
781         int err;
782
783         switch (np->port) {
784         case 0:
785                 ctrl_reg = ENET_SERDES_0_CTRL_CFG;
786                 test_cfg_reg = ENET_SERDES_0_TEST_CFG;
787                 break;
788         case 1:
789                 ctrl_reg = ENET_SERDES_1_CTRL_CFG;
790                 test_cfg_reg = ENET_SERDES_1_TEST_CFG;
791                 break;
792
793         default:
794                 return -EINVAL;
795         }
796         ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
797                     ENET_SERDES_CTRL_SDET_1 |
798                     ENET_SERDES_CTRL_SDET_2 |
799                     ENET_SERDES_CTRL_SDET_3 |
800                     (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
801                     (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
802                     (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
803                     (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
804                     (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
805                     (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
806                     (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
807                     (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
808         test_cfg_val = 0;
809
810         if (lp->loopback_mode == LOOPBACK_PHY) {
811                 test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
812                                   ENET_SERDES_TEST_MD_0_SHIFT) |
813                                  (ENET_TEST_MD_PAD_LOOPBACK <<
814                                   ENET_SERDES_TEST_MD_1_SHIFT) |
815                                  (ENET_TEST_MD_PAD_LOOPBACK <<
816                                   ENET_SERDES_TEST_MD_2_SHIFT) |
817                                  (ENET_TEST_MD_PAD_LOOPBACK <<
818                                   ENET_SERDES_TEST_MD_3_SHIFT));
819         }
820
821         nw64(ctrl_reg, ctrl_val);
822         nw64(test_cfg_reg, test_cfg_val);
823
824         /* Initialize all 4 lanes of the SERDES.  */
825         for (i = 0; i < 4; i++) {
826                 u32 rxtx_ctrl, glue0;
827
828                 err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
829                 if (err)
830                         return err;
831                 err = esr_read_glue0(np, i, &glue0);
832                 if (err)
833                         return err;
834
835                 rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
836                 rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
837                               (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
838
839                 glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
840                            ESR_GLUE_CTRL0_THCNT |
841                            ESR_GLUE_CTRL0_BLTIME);
842                 glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
843                           (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
844                           (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
845                           (BLTIME_300_CYCLES <<
846                            ESR_GLUE_CTRL0_BLTIME_SHIFT));
847
848                 err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
849                 if (err)
850                         return err;
851                 err = esr_write_glue0(np, i, glue0);
852                 if (err)
853                         return err;
854         }
855
856         err = esr_reset(np);
857         if (err)
858                 return err;
859
860         sig = nr64(ESR_INT_SIGNALS);
861         switch (np->port) {
862         case 0:
863                 mask = ESR_INT_SIGNALS_P0_BITS;
864                 val = (ESR_INT_SRDY0_P0 |
865                        ESR_INT_DET0_P0 |
866                        ESR_INT_XSRDY_P0 |
867                        ESR_INT_XDP_P0_CH3 |
868                        ESR_INT_XDP_P0_CH2 |
869                        ESR_INT_XDP_P0_CH1 |
870                        ESR_INT_XDP_P0_CH0);
871                 break;
872
873         case 1:
874                 mask = ESR_INT_SIGNALS_P1_BITS;
875                 val = (ESR_INT_SRDY0_P1 |
876                        ESR_INT_DET0_P1 |
877                        ESR_INT_XSRDY_P1 |
878                        ESR_INT_XDP_P1_CH3 |
879                        ESR_INT_XDP_P1_CH2 |
880                        ESR_INT_XDP_P1_CH1 |
881                        ESR_INT_XDP_P1_CH0);
882                 break;
883
884         default:
885                 return -EINVAL;
886         }
887
888         if ((sig & mask) != val) {
889                 if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
890                         np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
891                         return 0;
892                 }
893                 dev_err(np->device, PFX "Port %u signal bits [%08x] are not "
894                         "[%08x]\n", np->port, (int) (sig & mask), (int) val);
895                 return -ENODEV;
896         }
897         if (np->flags & NIU_FLAGS_HOTPLUG_PHY)
898                 np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT;
899         return 0;
900 }
901
902 static int serdes_init_1g(struct niu *np)
903 {
904         u64 val;
905
906         val = nr64(ENET_SERDES_1_PLL_CFG);
907         val &= ~ENET_SERDES_PLL_FBDIV2;
908         switch (np->port) {
909         case 0:
910                 val |= ENET_SERDES_PLL_HRATE0;
911                 break;
912         case 1:
913                 val |= ENET_SERDES_PLL_HRATE1;
914                 break;
915         case 2:
916                 val |= ENET_SERDES_PLL_HRATE2;
917                 break;
918         case 3:
919                 val |= ENET_SERDES_PLL_HRATE3;
920                 break;
921         default:
922                 return -EINVAL;
923         }
924         nw64(ENET_SERDES_1_PLL_CFG, val);
925
926         return 0;
927 }
928
929 static int serdes_init_1g_serdes(struct niu *np)
930 {
931         struct niu_link_config *lp = &np->link_config;
932         unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i;
933         u64 ctrl_val, test_cfg_val, sig, mask, val;
934         int err;
935         u64 reset_val, val_rd;
936
937         val = ENET_SERDES_PLL_HRATE0 | ENET_SERDES_PLL_HRATE1 |
938                 ENET_SERDES_PLL_HRATE2 | ENET_SERDES_PLL_HRATE3 |
939                 ENET_SERDES_PLL_FBDIV0;
940         switch (np->port) {
941         case 0:
942                 reset_val =  ENET_SERDES_RESET_0;
943                 ctrl_reg = ENET_SERDES_0_CTRL_CFG;
944                 test_cfg_reg = ENET_SERDES_0_TEST_CFG;
945                 pll_cfg = ENET_SERDES_0_PLL_CFG;
946                 break;
947         case 1:
948                 reset_val =  ENET_SERDES_RESET_1;
949                 ctrl_reg = ENET_SERDES_1_CTRL_CFG;
950                 test_cfg_reg = ENET_SERDES_1_TEST_CFG;
951                 pll_cfg = ENET_SERDES_1_PLL_CFG;
952                 break;
953
954         default:
955                 return -EINVAL;
956         }
957         ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
958                     ENET_SERDES_CTRL_SDET_1 |
959                     ENET_SERDES_CTRL_SDET_2 |
960                     ENET_SERDES_CTRL_SDET_3 |
961                     (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
962                     (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
963                     (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
964                     (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
965                     (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
966                     (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
967                     (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
968                     (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
969         test_cfg_val = 0;
970
971         if (lp->loopback_mode == LOOPBACK_PHY) {
972                 test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
973                                   ENET_SERDES_TEST_MD_0_SHIFT) |
974                                  (ENET_TEST_MD_PAD_LOOPBACK <<
975                                   ENET_SERDES_TEST_MD_1_SHIFT) |
976                                  (ENET_TEST_MD_PAD_LOOPBACK <<
977                                   ENET_SERDES_TEST_MD_2_SHIFT) |
978                                  (ENET_TEST_MD_PAD_LOOPBACK <<
979                                   ENET_SERDES_TEST_MD_3_SHIFT));
980         }
981
982         nw64(ENET_SERDES_RESET, reset_val);
983         mdelay(20);
984         val_rd = nr64(ENET_SERDES_RESET);
985         val_rd &= ~reset_val;
986         nw64(pll_cfg, val);
987         nw64(ctrl_reg, ctrl_val);
988         nw64(test_cfg_reg, test_cfg_val);
989         nw64(ENET_SERDES_RESET, val_rd);
990         mdelay(2000);
991
992         /* Initialize all 4 lanes of the SERDES.  */
993         for (i = 0; i < 4; i++) {
994                 u32 rxtx_ctrl, glue0;
995
996                 err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
997                 if (err)
998                         return err;
999                 err = esr_read_glue0(np, i, &glue0);
1000                 if (err)
1001                         return err;
1002
1003                 rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
1004                 rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
1005                               (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
1006
1007                 glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
1008                            ESR_GLUE_CTRL0_THCNT |
1009                            ESR_GLUE_CTRL0_BLTIME);
1010                 glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
1011                           (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
1012                           (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
1013                           (BLTIME_300_CYCLES <<
1014                            ESR_GLUE_CTRL0_BLTIME_SHIFT));
1015
1016                 err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
1017                 if (err)
1018                         return err;
1019                 err = esr_write_glue0(np, i, glue0);
1020                 if (err)
1021                         return err;
1022         }
1023
1024
1025         sig = nr64(ESR_INT_SIGNALS);
1026         switch (np->port) {
1027         case 0:
1028                 val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0);
1029                 mask = val;
1030                 break;
1031
1032         case 1:
1033                 val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1);
1034                 mask = val;
1035                 break;
1036
1037         default:
1038                 return -EINVAL;
1039         }
1040
1041         if ((sig & mask) != val) {
1042                 dev_err(np->device, PFX "Port %u signal bits [%08x] are not "
1043                         "[%08x]\n", np->port, (int) (sig & mask), (int) val);
1044                 return -ENODEV;
1045         }
1046
1047         return 0;
1048 }
1049
1050 static int link_status_1g_serdes(struct niu *np, int *link_up_p)
1051 {
1052         struct niu_link_config *lp = &np->link_config;
1053         int link_up;
1054         u64 val;
1055         u16 current_speed;
1056         unsigned long flags;
1057         u8 current_duplex;
1058
1059         link_up = 0;
1060         current_speed = SPEED_INVALID;
1061         current_duplex = DUPLEX_INVALID;
1062
1063         spin_lock_irqsave(&np->lock, flags);
1064
1065         val = nr64_pcs(PCS_MII_STAT);
1066
1067         if (val & PCS_MII_STAT_LINK_STATUS) {
1068                 link_up = 1;
1069                 current_speed = SPEED_1000;
1070                 current_duplex = DUPLEX_FULL;
1071         }
1072
1073         lp->active_speed = current_speed;
1074         lp->active_duplex = current_duplex;
1075         spin_unlock_irqrestore(&np->lock, flags);
1076
1077         *link_up_p = link_up;
1078         return 0;
1079 }
1080
1081 static int link_status_10g_serdes(struct niu *np, int *link_up_p)
1082 {
1083         unsigned long flags;
1084         struct niu_link_config *lp = &np->link_config;
1085         int link_up = 0;
1086         int link_ok = 1;
1087         u64 val, val2;
1088         u16 current_speed;
1089         u8 current_duplex;
1090
1091         if (!(np->flags & NIU_FLAGS_10G))
1092                 return link_status_1g_serdes(np, link_up_p);
1093
1094         current_speed = SPEED_INVALID;
1095         current_duplex = DUPLEX_INVALID;
1096         spin_lock_irqsave(&np->lock, flags);
1097
1098         val = nr64_xpcs(XPCS_STATUS(0));
1099         val2 = nr64_mac(XMAC_INTER2);
1100         if (val2 & 0x01000000)
1101                 link_ok = 0;
1102
1103         if ((val & 0x1000ULL) && link_ok) {
1104                 link_up = 1;
1105                 current_speed = SPEED_10000;
1106                 current_duplex = DUPLEX_FULL;
1107         }
1108         lp->active_speed = current_speed;
1109         lp->active_duplex = current_duplex;
1110         spin_unlock_irqrestore(&np->lock, flags);
1111         *link_up_p = link_up;
1112         return 0;
1113 }
1114
1115 static int link_status_mii(struct niu *np, int *link_up_p)
1116 {
1117         struct niu_link_config *lp = &np->link_config;
1118         int err;
1119         int bmsr, advert, ctrl1000, stat1000, lpa, bmcr, estatus;
1120         int supported, advertising, active_speed, active_duplex;
1121
1122         err = mii_read(np, np->phy_addr, MII_BMCR);
1123         if (unlikely(err < 0))
1124                 return err;
1125         bmcr = err;
1126
1127         err = mii_read(np, np->phy_addr, MII_BMSR);
1128         if (unlikely(err < 0))
1129                 return err;
1130         bmsr = err;
1131
1132         err = mii_read(np, np->phy_addr, MII_ADVERTISE);
1133         if (unlikely(err < 0))
1134                 return err;
1135         advert = err;
1136
1137         err = mii_read(np, np->phy_addr, MII_LPA);
1138         if (unlikely(err < 0))
1139                 return err;
1140         lpa = err;
1141
1142         if (likely(bmsr & BMSR_ESTATEN)) {
1143                 err = mii_read(np, np->phy_addr, MII_ESTATUS);
1144                 if (unlikely(err < 0))
1145                         return err;
1146                 estatus = err;
1147
1148                 err = mii_read(np, np->phy_addr, MII_CTRL1000);
1149                 if (unlikely(err < 0))
1150                         return err;
1151                 ctrl1000 = err;
1152
1153                 err = mii_read(np, np->phy_addr, MII_STAT1000);
1154                 if (unlikely(err < 0))
1155                         return err;
1156                 stat1000 = err;
1157         } else
1158                 estatus = ctrl1000 = stat1000 = 0;
1159
1160         supported = 0;
1161         if (bmsr & BMSR_ANEGCAPABLE)
1162                 supported |= SUPPORTED_Autoneg;
1163         if (bmsr & BMSR_10HALF)
1164                 supported |= SUPPORTED_10baseT_Half;
1165         if (bmsr & BMSR_10FULL)
1166                 supported |= SUPPORTED_10baseT_Full;
1167         if (bmsr & BMSR_100HALF)
1168                 supported |= SUPPORTED_100baseT_Half;
1169         if (bmsr & BMSR_100FULL)
1170                 supported |= SUPPORTED_100baseT_Full;
1171         if (estatus & ESTATUS_1000_THALF)
1172                 supported |= SUPPORTED_1000baseT_Half;
1173         if (estatus & ESTATUS_1000_TFULL)
1174                 supported |= SUPPORTED_1000baseT_Full;
1175         lp->supported = supported;
1176
1177         advertising = 0;
1178         if (advert & ADVERTISE_10HALF)
1179                 advertising |= ADVERTISED_10baseT_Half;
1180         if (advert & ADVERTISE_10FULL)
1181                 advertising |= ADVERTISED_10baseT_Full;
1182         if (advert & ADVERTISE_100HALF)
1183                 advertising |= ADVERTISED_100baseT_Half;
1184         if (advert & ADVERTISE_100FULL)
1185                 advertising |= ADVERTISED_100baseT_Full;
1186         if (ctrl1000 & ADVERTISE_1000HALF)
1187                 advertising |= ADVERTISED_1000baseT_Half;
1188         if (ctrl1000 & ADVERTISE_1000FULL)
1189                 advertising |= ADVERTISED_1000baseT_Full;
1190
1191         if (bmcr & BMCR_ANENABLE) {
1192                 int neg, neg1000;
1193
1194                 lp->active_autoneg = 1;
1195                 advertising |= ADVERTISED_Autoneg;
1196
1197                 neg = advert & lpa;
1198                 neg1000 = (ctrl1000 << 2) & stat1000;
1199
1200                 if (neg1000 & (LPA_1000FULL | LPA_1000HALF))
1201                         active_speed = SPEED_1000;
1202                 else if (neg & LPA_100)
1203                         active_speed = SPEED_100;
1204                 else if (neg & (LPA_10HALF | LPA_10FULL))
1205                         active_speed = SPEED_10;
1206                 else
1207                         active_speed = SPEED_INVALID;
1208
1209                 if ((neg1000 & LPA_1000FULL) || (neg & LPA_DUPLEX))
1210                         active_duplex = DUPLEX_FULL;
1211                 else if (active_speed != SPEED_INVALID)
1212                         active_duplex = DUPLEX_HALF;
1213                 else
1214                         active_duplex = DUPLEX_INVALID;
1215         } else {
1216                 lp->active_autoneg = 0;
1217
1218                 if ((bmcr & BMCR_SPEED1000) && !(bmcr & BMCR_SPEED100))
1219                         active_speed = SPEED_1000;
1220                 else if (bmcr & BMCR_SPEED100)
1221                         active_speed = SPEED_100;
1222                 else
1223                         active_speed = SPEED_10;
1224
1225                 if (bmcr & BMCR_FULLDPLX)
1226                         active_duplex = DUPLEX_FULL;
1227                 else
1228                         active_duplex = DUPLEX_HALF;
1229         }
1230
1231         lp->active_advertising = advertising;
1232         lp->active_speed = active_speed;
1233         lp->active_duplex = active_duplex;
1234         *link_up_p = !!(bmsr & BMSR_LSTATUS);
1235
1236         return 0;
1237 }
1238
1239 static int link_status_1g_rgmii(struct niu *np, int *link_up_p)
1240 {
1241         struct niu_link_config *lp = &np->link_config;
1242         u16 current_speed, bmsr;
1243         unsigned long flags;
1244         u8 current_duplex;
1245         int err, link_up;
1246
1247         link_up = 0;
1248         current_speed = SPEED_INVALID;
1249         current_duplex = DUPLEX_INVALID;
1250
1251         spin_lock_irqsave(&np->lock, flags);
1252
1253         err = -EINVAL;
1254
1255         err = mii_read(np, np->phy_addr, MII_BMSR);
1256         if (err < 0)
1257                 goto out;
1258
1259         bmsr = err;
1260         if (bmsr & BMSR_LSTATUS) {
1261                 u16 adv, lpa, common, estat;
1262
1263                 err = mii_read(np, np->phy_addr, MII_ADVERTISE);
1264                 if (err < 0)
1265                         goto out;
1266                 adv = err;
1267
1268                 err = mii_read(np, np->phy_addr, MII_LPA);
1269                 if (err < 0)
1270                         goto out;
1271                 lpa = err;
1272
1273                 common = adv & lpa;
1274
1275                 err = mii_read(np, np->phy_addr, MII_ESTATUS);
1276                 if (err < 0)
1277                         goto out;
1278                 estat = err;
1279                 link_up = 1;
1280                 current_speed = SPEED_1000;
1281                 current_duplex = DUPLEX_FULL;
1282
1283         }
1284         lp->active_speed = current_speed;
1285         lp->active_duplex = current_duplex;
1286         err = 0;
1287
1288 out:
1289         spin_unlock_irqrestore(&np->lock, flags);
1290
1291         *link_up_p = link_up;
1292         return err;
1293 }
1294
1295 static int link_status_1g(struct niu *np, int *link_up_p)
1296 {
1297         struct niu_link_config *lp = &np->link_config;
1298         unsigned long flags;
1299         int err;
1300
1301         spin_lock_irqsave(&np->lock, flags);
1302
1303         err = link_status_mii(np, link_up_p);
1304         lp->supported |= SUPPORTED_TP;
1305         lp->active_advertising |= ADVERTISED_TP;
1306
1307         spin_unlock_irqrestore(&np->lock, flags);
1308         return err;
1309 }
1310
1311 static int bcm8704_reset(struct niu *np)
1312 {
1313         int err, limit;
1314
1315         err = mdio_read(np, np->phy_addr,
1316                         BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
1317         if (err < 0 || err == 0xffff)
1318                 return err;
1319         err |= BMCR_RESET;
1320         err = mdio_write(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
1321                          MII_BMCR, err);
1322         if (err)
1323                 return err;
1324
1325         limit = 1000;
1326         while (--limit >= 0) {
1327                 err = mdio_read(np, np->phy_addr,
1328                                 BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
1329                 if (err < 0)
1330                         return err;
1331                 if (!(err & BMCR_RESET))
1332                         break;
1333         }
1334         if (limit < 0) {
1335                 dev_err(np->device, PFX "Port %u PHY will not reset "
1336                         "(bmcr=%04x)\n", np->port, (err & 0xffff));
1337                 return -ENODEV;
1338         }
1339         return 0;
1340 }
1341
1342 /* When written, certain PHY registers need to be read back twice
1343  * in order for the bits to settle properly.
1344  */
1345 static int bcm8704_user_dev3_readback(struct niu *np, int reg)
1346 {
1347         int err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg);
1348         if (err < 0)
1349                 return err;
1350         err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg);
1351         if (err < 0)
1352                 return err;
1353         return 0;
1354 }
1355
1356 static int bcm8706_init_user_dev3(struct niu *np)
1357 {
1358         int err;
1359
1360
1361         err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1362                         BCM8704_USER_OPT_DIGITAL_CTRL);
1363         if (err < 0)
1364                 return err;
1365         err &= ~USER_ODIG_CTRL_GPIOS;
1366         err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT);
1367         err |=  USER_ODIG_CTRL_RESV2;
1368         err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1369                          BCM8704_USER_OPT_DIGITAL_CTRL, err);
1370         if (err)
1371                 return err;
1372
1373         mdelay(1000);
1374
1375         return 0;
1376 }
1377
1378 static int bcm8704_init_user_dev3(struct niu *np)
1379 {
1380         int err;
1381
1382         err = mdio_write(np, np->phy_addr,
1383                          BCM8704_USER_DEV3_ADDR, BCM8704_USER_CONTROL,
1384                          (USER_CONTROL_OPTXRST_LVL |
1385                           USER_CONTROL_OPBIASFLT_LVL |
1386                           USER_CONTROL_OBTMPFLT_LVL |
1387                           USER_CONTROL_OPPRFLT_LVL |
1388                           USER_CONTROL_OPTXFLT_LVL |
1389                           USER_CONTROL_OPRXLOS_LVL |
1390                           USER_CONTROL_OPRXFLT_LVL |
1391                           USER_CONTROL_OPTXON_LVL |
1392                           (0x3f << USER_CONTROL_RES1_SHIFT)));
1393         if (err)
1394                 return err;
1395
1396         err = mdio_write(np, np->phy_addr,
1397                          BCM8704_USER_DEV3_ADDR, BCM8704_USER_PMD_TX_CONTROL,
1398                          (USER_PMD_TX_CTL_XFP_CLKEN |
1399                           (1 << USER_PMD_TX_CTL_TX_DAC_TXD_SH) |
1400                           (2 << USER_PMD_TX_CTL_TX_DAC_TXCK_SH) |
1401                           USER_PMD_TX_CTL_TSCK_LPWREN));
1402         if (err)
1403                 return err;
1404
1405         err = bcm8704_user_dev3_readback(np, BCM8704_USER_CONTROL);
1406         if (err)
1407                 return err;
1408         err = bcm8704_user_dev3_readback(np, BCM8704_USER_PMD_TX_CONTROL);
1409         if (err)
1410                 return err;
1411
1412         err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1413                         BCM8704_USER_OPT_DIGITAL_CTRL);
1414         if (err < 0)
1415                 return err;
1416         err &= ~USER_ODIG_CTRL_GPIOS;
1417         err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT);
1418         err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1419                          BCM8704_USER_OPT_DIGITAL_CTRL, err);
1420         if (err)
1421                 return err;
1422
1423         mdelay(1000);
1424
1425         return 0;
1426 }
1427
1428 static int mrvl88x2011_act_led(struct niu *np, int val)
1429 {
1430         int     err;
1431
1432         err  = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
1433                 MRVL88X2011_LED_8_TO_11_CTL);
1434         if (err < 0)
1435                 return err;
1436
1437         err &= ~MRVL88X2011_LED(MRVL88X2011_LED_ACT,MRVL88X2011_LED_CTL_MASK);
1438         err |=  MRVL88X2011_LED(MRVL88X2011_LED_ACT,val);
1439
1440         return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
1441                           MRVL88X2011_LED_8_TO_11_CTL, err);
1442 }
1443
1444 static int mrvl88x2011_led_blink_rate(struct niu *np, int rate)
1445 {
1446         int     err;
1447
1448         err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
1449                         MRVL88X2011_LED_BLINK_CTL);
1450         if (err >= 0) {
1451                 err &= ~MRVL88X2011_LED_BLKRATE_MASK;
1452                 err |= (rate << 4);
1453
1454                 err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
1455                                  MRVL88X2011_LED_BLINK_CTL, err);
1456         }
1457
1458         return err;
1459 }
1460
1461 static int xcvr_init_10g_mrvl88x2011(struct niu *np)
1462 {
1463         int     err;
1464
1465         /* Set LED functions */
1466         err = mrvl88x2011_led_blink_rate(np, MRVL88X2011_LED_BLKRATE_134MS);
1467         if (err)
1468                 return err;
1469
1470         /* led activity */
1471         err = mrvl88x2011_act_led(np, MRVL88X2011_LED_CTL_OFF);
1472         if (err)
1473                 return err;
1474
1475         err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
1476                         MRVL88X2011_GENERAL_CTL);
1477         if (err < 0)
1478                 return err;
1479
1480         err |= MRVL88X2011_ENA_XFPREFCLK;
1481
1482         err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
1483                          MRVL88X2011_GENERAL_CTL, err);
1484         if (err < 0)
1485                 return err;
1486
1487         err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1488                         MRVL88X2011_PMA_PMD_CTL_1);
1489         if (err < 0)
1490                 return err;
1491
1492         if (np->link_config.loopback_mode == LOOPBACK_MAC)
1493                 err |= MRVL88X2011_LOOPBACK;
1494         else
1495                 err &= ~MRVL88X2011_LOOPBACK;
1496
1497         err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1498                          MRVL88X2011_PMA_PMD_CTL_1, err);
1499         if (err < 0)
1500                 return err;
1501
1502         /* Enable PMD  */
1503         return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1504                           MRVL88X2011_10G_PMD_TX_DIS, MRVL88X2011_ENA_PMDTX);
1505 }
1506
1507
1508 static int xcvr_diag_bcm870x(struct niu *np)
1509 {
1510         u16 analog_stat0, tx_alarm_status;
1511         int err = 0;
1512
1513 #if 1
1514         err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
1515                         MII_STAT1000);
1516         if (err < 0)
1517                 return err;
1518         pr_info(PFX "Port %u PMA_PMD(MII_STAT1000) [%04x]\n",
1519                 np->port, err);
1520
1521         err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 0x20);
1522         if (err < 0)
1523                 return err;
1524         pr_info(PFX "Port %u USER_DEV3(0x20) [%04x]\n",
1525                 np->port, err);
1526
1527         err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
1528                         MII_NWAYTEST);
1529         if (err < 0)
1530                 return err;
1531         pr_info(PFX "Port %u PHYXS(MII_NWAYTEST) [%04x]\n",
1532                 np->port, err);
1533 #endif
1534
1535         /* XXX dig this out it might not be so useful XXX */
1536         err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1537                         BCM8704_USER_ANALOG_STATUS0);
1538         if (err < 0)
1539                 return err;
1540         err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1541                         BCM8704_USER_ANALOG_STATUS0);
1542         if (err < 0)
1543                 return err;
1544         analog_stat0 = err;
1545
1546         err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1547                         BCM8704_USER_TX_ALARM_STATUS);
1548         if (err < 0)
1549                 return err;
1550         err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1551                         BCM8704_USER_TX_ALARM_STATUS);
1552         if (err < 0)
1553                 return err;
1554         tx_alarm_status = err;
1555
1556         if (analog_stat0 != 0x03fc) {
1557                 if ((analog_stat0 == 0x43bc) && (tx_alarm_status != 0)) {
1558                         pr_info(PFX "Port %u cable not connected "
1559                                 "or bad cable.\n", np->port);
1560                 } else if (analog_stat0 == 0x639c) {
1561                         pr_info(PFX "Port %u optical module is bad "
1562                                 "or missing.\n", np->port);
1563                 }
1564         }
1565
1566         return 0;
1567 }
1568
1569 static int xcvr_10g_set_lb_bcm870x(struct niu *np)
1570 {
1571         struct niu_link_config *lp = &np->link_config;
1572         int err;
1573
1574         err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
1575                         MII_BMCR);
1576         if (err < 0)
1577                 return err;
1578
1579         err &= ~BMCR_LOOPBACK;
1580
1581         if (lp->loopback_mode == LOOPBACK_MAC)
1582                 err |= BMCR_LOOPBACK;
1583
1584         err = mdio_write(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
1585                          MII_BMCR, err);
1586         if (err)
1587                 return err;
1588
1589         return 0;
1590 }
1591
1592 static int xcvr_init_10g_bcm8706(struct niu *np)
1593 {
1594         int err = 0;
1595         u64 val;
1596
1597         if ((np->flags & NIU_FLAGS_HOTPLUG_PHY) &&
1598             (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) == 0)
1599                         return err;
1600
1601         val = nr64_mac(XMAC_CONFIG);
1602         val &= ~XMAC_CONFIG_LED_POLARITY;
1603         val |= XMAC_CONFIG_FORCE_LED_ON;
1604         nw64_mac(XMAC_CONFIG, val);
1605
1606         val = nr64(MIF_CONFIG);
1607         val |= MIF_CONFIG_INDIRECT_MODE;
1608         nw64(MIF_CONFIG, val);
1609
1610         err = bcm8704_reset(np);
1611         if (err)
1612                 return err;
1613
1614         err = xcvr_10g_set_lb_bcm870x(np);
1615         if (err)
1616                 return err;
1617
1618         err = bcm8706_init_user_dev3(np);
1619         if (err)
1620                 return err;
1621
1622         err = xcvr_diag_bcm870x(np);
1623         if (err)
1624                 return err;
1625
1626         return 0;
1627 }
1628
1629 static int xcvr_init_10g_bcm8704(struct niu *np)
1630 {
1631         int err;
1632
1633         err = bcm8704_reset(np);
1634         if (err)
1635                 return err;
1636
1637         err = bcm8704_init_user_dev3(np);
1638         if (err)
1639                 return err;
1640
1641         err = xcvr_10g_set_lb_bcm870x(np);
1642         if (err)
1643                 return err;
1644
1645         err =  xcvr_diag_bcm870x(np);
1646         if (err)
1647                 return err;
1648
1649         return 0;
1650 }
1651
1652 static int xcvr_init_10g(struct niu *np)
1653 {
1654         int phy_id, err;
1655         u64 val;
1656
1657         val = nr64_mac(XMAC_CONFIG);
1658         val &= ~XMAC_CONFIG_LED_POLARITY;
1659         val |= XMAC_CONFIG_FORCE_LED_ON;
1660         nw64_mac(XMAC_CONFIG, val);
1661
1662         /* XXX shared resource, lock parent XXX */
1663         val = nr64(MIF_CONFIG);
1664         val |= MIF_CONFIG_INDIRECT_MODE;
1665         nw64(MIF_CONFIG, val);
1666
1667         phy_id = phy_decode(np->parent->port_phy, np->port);
1668         phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port];
1669
1670         /* handle different phy types */
1671         switch (phy_id & NIU_PHY_ID_MASK) {
1672         case NIU_PHY_ID_MRVL88X2011:
1673                 err = xcvr_init_10g_mrvl88x2011(np);
1674                 break;
1675
1676         default: /* bcom 8704 */
1677                 err = xcvr_init_10g_bcm8704(np);
1678                 break;
1679         }
1680
1681         return 0;
1682 }
1683
1684 static int mii_reset(struct niu *np)
1685 {
1686         int limit, err;
1687
1688         err = mii_write(np, np->phy_addr, MII_BMCR, BMCR_RESET);
1689         if (err)
1690                 return err;
1691
1692         limit = 1000;
1693         while (--limit >= 0) {
1694                 udelay(500);
1695                 err = mii_read(np, np->phy_addr, MII_BMCR);
1696                 if (err < 0)
1697                         return err;
1698                 if (!(err & BMCR_RESET))
1699                         break;
1700         }
1701         if (limit < 0) {
1702                 dev_err(np->device, PFX "Port %u MII would not reset, "
1703                         "bmcr[%04x]\n", np->port, err);
1704                 return -ENODEV;
1705         }
1706
1707         return 0;
1708 }
1709
1710 static int xcvr_init_1g_rgmii(struct niu *np)
1711 {
1712         int err;
1713         u64 val;
1714         u16 bmcr, bmsr, estat;
1715
1716         val = nr64(MIF_CONFIG);
1717         val &= ~MIF_CONFIG_INDIRECT_MODE;
1718         nw64(MIF_CONFIG, val);
1719
1720         err = mii_reset(np);
1721         if (err)
1722                 return err;
1723
1724         err = mii_read(np, np->phy_addr, MII_BMSR);
1725         if (err < 0)
1726                 return err;
1727         bmsr = err;
1728
1729         estat = 0;
1730         if (bmsr & BMSR_ESTATEN) {
1731                 err = mii_read(np, np->phy_addr, MII_ESTATUS);
1732                 if (err < 0)
1733                         return err;
1734                 estat = err;
1735         }
1736
1737         bmcr = 0;
1738         err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
1739         if (err)
1740                 return err;
1741
1742         if (bmsr & BMSR_ESTATEN) {
1743                 u16 ctrl1000 = 0;
1744
1745                 if (estat & ESTATUS_1000_TFULL)
1746                         ctrl1000 |= ADVERTISE_1000FULL;
1747                 err = mii_write(np, np->phy_addr, MII_CTRL1000, ctrl1000);
1748                 if (err)
1749                         return err;
1750         }
1751
1752         bmcr = (BMCR_SPEED1000 | BMCR_FULLDPLX);
1753
1754         err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
1755         if (err)
1756                 return err;
1757
1758         err = mii_read(np, np->phy_addr, MII_BMCR);
1759         if (err < 0)
1760                 return err;
1761         bmcr = mii_read(np, np->phy_addr, MII_BMCR);
1762
1763         err = mii_read(np, np->phy_addr, MII_BMSR);
1764         if (err < 0)
1765                 return err;
1766
1767         return 0;
1768 }
1769
1770 static int mii_init_common(struct niu *np)
1771 {
1772         struct niu_link_config *lp = &np->link_config;
1773         u16 bmcr, bmsr, adv, estat;
1774         int err;
1775
1776         err = mii_reset(np);
1777         if (err)
1778                 return err;
1779
1780         err = mii_read(np, np->phy_addr, MII_BMSR);
1781         if (err < 0)
1782                 return err;
1783         bmsr = err;
1784
1785         estat = 0;
1786         if (bmsr & BMSR_ESTATEN) {
1787                 err = mii_read(np, np->phy_addr, MII_ESTATUS);
1788                 if (err < 0)
1789                         return err;
1790                 estat = err;
1791         }
1792
1793         bmcr = 0;
1794         err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
1795         if (err)
1796                 return err;
1797
1798         if (lp->loopback_mode == LOOPBACK_MAC) {
1799                 bmcr |= BMCR_LOOPBACK;
1800                 if (lp->active_speed == SPEED_1000)
1801                         bmcr |= BMCR_SPEED1000;
1802                 if (lp->active_duplex == DUPLEX_FULL)
1803                         bmcr |= BMCR_FULLDPLX;
1804         }
1805
1806         if (lp->loopback_mode == LOOPBACK_PHY) {
1807                 u16 aux;
1808
1809                 aux = (BCM5464R_AUX_CTL_EXT_LB |
1810                        BCM5464R_AUX_CTL_WRITE_1);
1811                 err = mii_write(np, np->phy_addr, BCM5464R_AUX_CTL, aux);
1812                 if (err)
1813                         return err;
1814         }
1815
1816         if (lp->autoneg) {
1817                 u16 ctrl1000;
1818
1819                 adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1820                 if ((bmsr & BMSR_10HALF) &&
1821                         (lp->advertising & ADVERTISED_10baseT_Half))
1822                         adv |= ADVERTISE_10HALF;
1823                 if ((bmsr & BMSR_10FULL) &&
1824                         (lp->advertising & ADVERTISED_10baseT_Full))
1825                         adv |= ADVERTISE_10FULL;
1826                 if ((bmsr & BMSR_100HALF) &&
1827                         (lp->advertising & ADVERTISED_100baseT_Half))
1828                         adv |= ADVERTISE_100HALF;
1829                 if ((bmsr & BMSR_100FULL) &&
1830                         (lp->advertising & ADVERTISED_100baseT_Full))
1831                         adv |= ADVERTISE_100FULL;
1832                 err = mii_write(np, np->phy_addr, MII_ADVERTISE, adv);
1833                 if (err)
1834                         return err;
1835
1836                 if (likely(bmsr & BMSR_ESTATEN)) {
1837                         ctrl1000 = 0;
1838                         if ((estat & ESTATUS_1000_THALF) &&
1839                                 (lp->advertising & ADVERTISED_1000baseT_Half))
1840                                 ctrl1000 |= ADVERTISE_1000HALF;
1841                         if ((estat & ESTATUS_1000_TFULL) &&
1842                                 (lp->advertising & ADVERTISED_1000baseT_Full))
1843                                 ctrl1000 |= ADVERTISE_1000FULL;
1844                         err = mii_write(np, np->phy_addr,
1845                                         MII_CTRL1000, ctrl1000);
1846                         if (err)
1847                                 return err;
1848                 }
1849
1850                 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
1851         } else {
1852                 /* !lp->autoneg */
1853                 int fulldpx;
1854
1855                 if (lp->duplex == DUPLEX_FULL) {
1856                         bmcr |= BMCR_FULLDPLX;
1857                         fulldpx = 1;
1858                 } else if (lp->duplex == DUPLEX_HALF)
1859                         fulldpx = 0;
1860                 else
1861                         return -EINVAL;
1862
1863                 if (lp->speed == SPEED_1000) {
1864                         /* if X-full requested while not supported, or
1865                            X-half requested while not supported... */
1866                         if ((fulldpx && !(estat & ESTATUS_1000_TFULL)) ||
1867                                 (!fulldpx && !(estat & ESTATUS_1000_THALF)))
1868                                 return -EINVAL;
1869                         bmcr |= BMCR_SPEED1000;
1870                 } else if (lp->speed == SPEED_100) {
1871                         if ((fulldpx && !(bmsr & BMSR_100FULL)) ||
1872                                 (!fulldpx && !(bmsr & BMSR_100HALF)))
1873                                 return -EINVAL;
1874                         bmcr |= BMCR_SPEED100;
1875                 } else if (lp->speed == SPEED_10) {
1876                         if ((fulldpx && !(bmsr & BMSR_10FULL)) ||
1877                                 (!fulldpx && !(bmsr & BMSR_10HALF)))
1878                                 return -EINVAL;
1879                 } else
1880                         return -EINVAL;
1881         }
1882
1883         err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
1884         if (err)
1885                 return err;
1886
1887 #if 0
1888         err = mii_read(np, np->phy_addr, MII_BMCR);
1889         if (err < 0)
1890                 return err;
1891         bmcr = err;
1892
1893         err = mii_read(np, np->phy_addr, MII_BMSR);
1894         if (err < 0)
1895                 return err;
1896         bmsr = err;
1897
1898         pr_info(PFX "Port %u after MII init bmcr[%04x] bmsr[%04x]\n",
1899                 np->port, bmcr, bmsr);
1900 #endif
1901
1902         return 0;
1903 }
1904
1905 static int xcvr_init_1g(struct niu *np)
1906 {
1907         u64 val;
1908
1909         /* XXX shared resource, lock parent XXX */
1910         val = nr64(MIF_CONFIG);
1911         val &= ~MIF_CONFIG_INDIRECT_MODE;
1912         nw64(MIF_CONFIG, val);
1913
1914         return mii_init_common(np);
1915 }
1916
1917 static int niu_xcvr_init(struct niu *np)
1918 {
1919         const struct niu_phy_ops *ops = np->phy_ops;
1920         int err;
1921
1922         err = 0;
1923         if (ops->xcvr_init)
1924                 err = ops->xcvr_init(np);
1925
1926         return err;
1927 }
1928
1929 static int niu_serdes_init(struct niu *np)
1930 {
1931         const struct niu_phy_ops *ops = np->phy_ops;
1932         int err;
1933
1934         err = 0;
1935         if (ops->serdes_init)
1936                 err = ops->serdes_init(np);
1937
1938         return err;
1939 }
1940
1941 static void niu_init_xif(struct niu *);
1942 static void niu_handle_led(struct niu *, int status);
1943
1944 static int niu_link_status_common(struct niu *np, int link_up)
1945 {
1946         struct niu_link_config *lp = &np->link_config;
1947         struct net_device *dev = np->dev;
1948         unsigned long flags;
1949
1950         if (!netif_carrier_ok(dev) && link_up) {
1951                 niuinfo(LINK, "%s: Link is up at %s, %s duplex\n",
1952                        dev->name,
1953                        (lp->active_speed == SPEED_10000 ?
1954                         "10Gb/sec" :
1955                         (lp->active_speed == SPEED_1000 ?
1956                          "1Gb/sec" :
1957                          (lp->active_speed == SPEED_100 ?
1958                           "100Mbit/sec" : "10Mbit/sec"))),
1959                        (lp->active_duplex == DUPLEX_FULL ?
1960                         "full" : "half"));
1961
1962                 spin_lock_irqsave(&np->lock, flags);
1963                 niu_init_xif(np);
1964                 niu_handle_led(np, 1);
1965                 spin_unlock_irqrestore(&np->lock, flags);
1966
1967                 netif_carrier_on(dev);
1968         } else if (netif_carrier_ok(dev) && !link_up) {
1969                 niuwarn(LINK, "%s: Link is down\n", dev->name);
1970                 spin_lock_irqsave(&np->lock, flags);
1971                 niu_handle_led(np, 0);
1972                 spin_unlock_irqrestore(&np->lock, flags);
1973                 netif_carrier_off(dev);
1974         }
1975
1976         return 0;
1977 }
1978
1979 static int link_status_10g_mrvl(struct niu *np, int *link_up_p)
1980 {
1981         int err, link_up, pma_status, pcs_status;
1982
1983         link_up = 0;
1984
1985         err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1986                         MRVL88X2011_10G_PMD_STATUS_2);
1987         if (err < 0)
1988                 goto out;
1989
1990         /* Check PMA/PMD Register: 1.0001.2 == 1 */
1991         err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1992                         MRVL88X2011_PMA_PMD_STATUS_1);
1993         if (err < 0)
1994                 goto out;
1995
1996         pma_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0);
1997
1998         /* Check PMC Register : 3.0001.2 == 1: read twice */
1999         err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
2000                         MRVL88X2011_PMA_PMD_STATUS_1);
2001         if (err < 0)
2002                 goto out;
2003
2004         err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
2005                         MRVL88X2011_PMA_PMD_STATUS_1);
2006         if (err < 0)
2007                 goto out;
2008
2009         pcs_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0);
2010
2011         /* Check XGXS Register : 4.0018.[0-3,12] */
2012         err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV4_ADDR,
2013                         MRVL88X2011_10G_XGXS_LANE_STAT);
2014         if (err < 0)
2015                 goto out;
2016
2017         if (err == (PHYXS_XGXS_LANE_STAT_ALINGED | PHYXS_XGXS_LANE_STAT_LANE3 |
2018                     PHYXS_XGXS_LANE_STAT_LANE2 | PHYXS_XGXS_LANE_STAT_LANE1 |
2019                     PHYXS_XGXS_LANE_STAT_LANE0 | PHYXS_XGXS_LANE_STAT_MAGIC |
2020                     0x800))
2021                 link_up = (pma_status && pcs_status) ? 1 : 0;
2022
2023         np->link_config.active_speed = SPEED_10000;
2024         np->link_config.active_duplex = DUPLEX_FULL;
2025         err = 0;
2026 out:
2027         mrvl88x2011_act_led(np, (link_up ?
2028                                  MRVL88X2011_LED_CTL_PCS_ACT :
2029                                  MRVL88X2011_LED_CTL_OFF));
2030
2031         *link_up_p = link_up;
2032         return err;
2033 }
2034
2035 static int link_status_10g_bcm8706(struct niu *np, int *link_up_p)
2036 {
2037         int err, link_up;
2038         link_up = 0;
2039
2040         err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
2041                         BCM8704_PMD_RCV_SIGDET);
2042         if (err < 0 || err == 0xffff)
2043                 goto out;
2044         if (!(err & PMD_RCV_SIGDET_GLOBAL)) {
2045                 err = 0;
2046                 goto out;
2047         }
2048
2049         err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
2050                         BCM8704_PCS_10G_R_STATUS);
2051         if (err < 0)
2052                 goto out;
2053
2054         if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) {
2055                 err = 0;
2056                 goto out;
2057         }
2058
2059         err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
2060                         BCM8704_PHYXS_XGXS_LANE_STAT);
2061         if (err < 0)
2062                 goto out;
2063         if (err != (PHYXS_XGXS_LANE_STAT_ALINGED |
2064                     PHYXS_XGXS_LANE_STAT_MAGIC |
2065                     PHYXS_XGXS_LANE_STAT_PATTEST |
2066                     PHYXS_XGXS_LANE_STAT_LANE3 |
2067                     PHYXS_XGXS_LANE_STAT_LANE2 |
2068                     PHYXS_XGXS_LANE_STAT_LANE1 |
2069                     PHYXS_XGXS_LANE_STAT_LANE0)) {
2070                 err = 0;
2071                 np->link_config.active_speed = SPEED_INVALID;
2072                 np->link_config.active_duplex = DUPLEX_INVALID;
2073                 goto out;
2074         }
2075
2076         link_up = 1;
2077         np->link_config.active_speed = SPEED_10000;
2078         np->link_config.active_duplex = DUPLEX_FULL;
2079         err = 0;
2080
2081 out:
2082         *link_up_p = link_up;
2083         return err;
2084 }
2085
2086 static int link_status_10g_bcom(struct niu *np, int *link_up_p)
2087 {
2088         int err, link_up;
2089
2090         link_up = 0;
2091
2092         err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
2093                         BCM8704_PMD_RCV_SIGDET);
2094         if (err < 0)
2095                 goto out;
2096         if (!(err & PMD_RCV_SIGDET_GLOBAL)) {
2097                 err = 0;
2098                 goto out;
2099         }
2100
2101         err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
2102                         BCM8704_PCS_10G_R_STATUS);
2103         if (err < 0)
2104                 goto out;
2105         if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) {
2106                 err = 0;
2107                 goto out;
2108         }
2109
2110         err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
2111                         BCM8704_PHYXS_XGXS_LANE_STAT);
2112         if (err < 0)
2113                 goto out;
2114
2115         if (err != (PHYXS_XGXS_LANE_STAT_ALINGED |
2116                     PHYXS_XGXS_LANE_STAT_MAGIC |
2117                     PHYXS_XGXS_LANE_STAT_LANE3 |
2118                     PHYXS_XGXS_LANE_STAT_LANE2 |
2119                     PHYXS_XGXS_LANE_STAT_LANE1 |
2120                     PHYXS_XGXS_LANE_STAT_LANE0)) {
2121                 err = 0;
2122                 goto out;
2123         }
2124
2125         link_up = 1;
2126         np->link_config.active_speed = SPEED_10000;
2127         np->link_config.active_duplex = DUPLEX_FULL;
2128         err = 0;
2129
2130 out:
2131         *link_up_p = link_up;
2132         return err;
2133 }
2134
2135 static int link_status_10g(struct niu *np, int *link_up_p)
2136 {
2137         unsigned long flags;
2138         int err = -EINVAL;
2139
2140         spin_lock_irqsave(&np->lock, flags);
2141
2142         if (np->link_config.loopback_mode == LOOPBACK_DISABLED) {
2143                 int phy_id;
2144
2145                 phy_id = phy_decode(np->parent->port_phy, np->port);
2146                 phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port];
2147
2148                 /* handle different phy types */
2149                 switch (phy_id & NIU_PHY_ID_MASK) {
2150                 case NIU_PHY_ID_MRVL88X2011:
2151                         err = link_status_10g_mrvl(np, link_up_p);
2152                         break;
2153
2154                 default: /* bcom 8704 */
2155                         err = link_status_10g_bcom(np, link_up_p);
2156                         break;
2157                 }
2158         }
2159
2160         spin_unlock_irqrestore(&np->lock, flags);
2161
2162         return err;
2163 }
2164
2165 static int niu_10g_phy_present(struct niu *np)
2166 {
2167         u64 sig, mask, val;
2168
2169         sig = nr64(ESR_INT_SIGNALS);
2170         switch (np->port) {
2171         case 0:
2172                 mask = ESR_INT_SIGNALS_P0_BITS;
2173                 val = (ESR_INT_SRDY0_P0 |
2174                        ESR_INT_DET0_P0 |
2175                        ESR_INT_XSRDY_P0 |
2176                        ESR_INT_XDP_P0_CH3 |
2177                        ESR_INT_XDP_P0_CH2 |
2178                        ESR_INT_XDP_P0_CH1 |
2179                        ESR_INT_XDP_P0_CH0);
2180                 break;
2181
2182         case 1:
2183                 mask = ESR_INT_SIGNALS_P1_BITS;
2184                 val = (ESR_INT_SRDY0_P1 |
2185                        ESR_INT_DET0_P1 |
2186                        ESR_INT_XSRDY_P1 |
2187                        ESR_INT_XDP_P1_CH3 |
2188                        ESR_INT_XDP_P1_CH2 |
2189                        ESR_INT_XDP_P1_CH1 |
2190                        ESR_INT_XDP_P1_CH0);
2191                 break;
2192
2193         default:
2194                 return 0;
2195         }
2196
2197         if ((sig & mask) != val)
2198                 return 0;
2199         return 1;
2200 }
2201
2202 static int link_status_10g_hotplug(struct niu *np, int *link_up_p)
2203 {
2204         unsigned long flags;
2205         int err = 0;
2206         int phy_present;
2207         int phy_present_prev;
2208
2209         spin_lock_irqsave(&np->lock, flags);
2210
2211         if (np->link_config.loopback_mode == LOOPBACK_DISABLED) {
2212                 phy_present_prev = (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) ?
2213                         1 : 0;
2214                 phy_present = niu_10g_phy_present(np);
2215                 if (phy_present != phy_present_prev) {
2216                         /* state change */
2217                         if (phy_present) {
2218                                 /* A NEM was just plugged in */
2219                                 np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT;
2220                                 if (np->phy_ops->xcvr_init)
2221                                         err = np->phy_ops->xcvr_init(np);
2222                                 if (err) {
2223                                         err = mdio_read(np, np->phy_addr,
2224                                                 BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
2225                                         if (err == 0xffff) {
2226                                                 /* No mdio, back-to-back XAUI */
2227                                                 goto out;
2228                                         }
2229                                         /* debounce */
2230                                         np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
2231                                 }
2232                         } else {
2233                                 np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
2234                                 *link_up_p = 0;
2235                                 niuwarn(LINK, "%s: Hotplug PHY Removed\n",
2236                                         np->dev->name);
2237                         }
2238                 }
2239 out:
2240                 if (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) {
2241                         err = link_status_10g_bcm8706(np, link_up_p);
2242                         if (err == 0xffff) {
2243                                 /* No mdio, back-to-back XAUI: it is C10NEM */
2244                                 *link_up_p = 1;
2245                                 np->link_config.active_speed = SPEED_10000;
2246                                 np->link_config.active_duplex = DUPLEX_FULL;
2247                         }
2248                 }
2249         }
2250
2251         spin_unlock_irqrestore(&np->lock, flags);
2252
2253         return 0;
2254 }
2255
2256 static int niu_link_status(struct niu *np, int *link_up_p)
2257 {
2258         const struct niu_phy_ops *ops = np->phy_ops;
2259         int err;
2260
2261         err = 0;
2262         if (ops->link_status)
2263                 err = ops->link_status(np, link_up_p);
2264
2265         return err;
2266 }
2267
2268 static void niu_timer(unsigned long __opaque)
2269 {
2270         struct niu *np = (struct niu *) __opaque;
2271         unsigned long off;
2272         int err, link_up;
2273
2274         err = niu_link_status(np, &link_up);
2275         if (!err)
2276                 niu_link_status_common(np, link_up);
2277
2278         if (netif_carrier_ok(np->dev))
2279                 off = 5 * HZ;
2280         else
2281                 off = 1 * HZ;
2282         np->timer.expires = jiffies + off;
2283
2284         add_timer(&np->timer);
2285 }
2286
2287 static const struct niu_phy_ops phy_ops_10g_serdes = {
2288         .serdes_init            = serdes_init_10g_serdes,
2289         .link_status            = link_status_10g_serdes,
2290 };
2291
2292 static const struct niu_phy_ops phy_ops_10g_serdes_niu = {
2293         .serdes_init            = serdes_init_niu_10g_serdes,
2294         .link_status            = link_status_10g_serdes,
2295 };
2296
2297 static const struct niu_phy_ops phy_ops_1g_serdes_niu = {
2298         .serdes_init            = serdes_init_niu_1g_serdes,
2299         .link_status            = link_status_1g_serdes,
2300 };
2301
2302 static const struct niu_phy_ops phy_ops_1g_rgmii = {
2303         .xcvr_init              = xcvr_init_1g_rgmii,
2304         .link_status            = link_status_1g_rgmii,
2305 };
2306
2307 static const struct niu_phy_ops phy_ops_10g_fiber_niu = {
2308         .serdes_init            = serdes_init_niu_10g_fiber,
2309         .xcvr_init              = xcvr_init_10g,
2310         .link_status            = link_status_10g,
2311 };
2312
2313 static const struct niu_phy_ops phy_ops_10g_fiber = {
2314         .serdes_init            = serdes_init_10g,
2315         .xcvr_init              = xcvr_init_10g,
2316         .link_status            = link_status_10g,
2317 };
2318
2319 static const struct niu_phy_ops phy_ops_10g_fiber_hotplug = {
2320         .serdes_init            = serdes_init_10g,
2321         .xcvr_init              = xcvr_init_10g_bcm8706,
2322         .link_status            = link_status_10g_hotplug,
2323 };
2324
2325 static const struct niu_phy_ops phy_ops_niu_10g_hotplug = {
2326         .serdes_init            = serdes_init_niu_10g_fiber,
2327         .xcvr_init              = xcvr_init_10g_bcm8706,
2328         .link_status            = link_status_10g_hotplug,
2329 };
2330
2331 static const struct niu_phy_ops phy_ops_10g_copper = {
2332         .serdes_init            = serdes_init_10g,
2333         .link_status            = link_status_10g, /* XXX */
2334 };
2335
2336 static const struct niu_phy_ops phy_ops_1g_fiber = {
2337         .serdes_init            = serdes_init_1g,
2338         .xcvr_init              = xcvr_init_1g,
2339         .link_status            = link_status_1g,
2340 };
2341
2342 static const struct niu_phy_ops phy_ops_1g_copper = {
2343         .xcvr_init              = xcvr_init_1g,
2344         .link_status            = link_status_1g,
2345 };
2346
2347 struct niu_phy_template {
2348         const struct niu_phy_ops        *ops;
2349         u32                             phy_addr_base;
2350 };
2351
2352 static const struct niu_phy_template phy_template_niu_10g_fiber = {
2353         .ops            = &phy_ops_10g_fiber_niu,
2354         .phy_addr_base  = 16,
2355 };
2356
2357 static const struct niu_phy_template phy_template_niu_10g_serdes = {
2358         .ops            = &phy_ops_10g_serdes_niu,
2359         .phy_addr_base  = 0,
2360 };
2361
2362 static const struct niu_phy_template phy_template_niu_1g_serdes = {
2363         .ops            = &phy_ops_1g_serdes_niu,
2364         .phy_addr_base  = 0,
2365 };
2366
2367 static const struct niu_phy_template phy_template_10g_fiber = {
2368         .ops            = &phy_ops_10g_fiber,
2369         .phy_addr_base  = 8,
2370 };
2371
2372 static const struct niu_phy_template phy_template_10g_fiber_hotplug = {
2373         .ops            = &phy_ops_10g_fiber_hotplug,
2374         .phy_addr_base  = 8,
2375 };
2376
2377 static const struct niu_phy_template phy_template_niu_10g_hotplug = {
2378         .ops            = &phy_ops_niu_10g_hotplug,
2379         .phy_addr_base  = 8,
2380 };
2381
2382 static const struct niu_phy_template phy_template_10g_copper = {
2383         .ops            = &phy_ops_10g_copper,
2384         .phy_addr_base  = 10,
2385 };
2386
2387 static const struct niu_phy_template phy_template_1g_fiber = {
2388         .ops            = &phy_ops_1g_fiber,
2389         .phy_addr_base  = 0,
2390 };
2391
2392 static const struct niu_phy_template phy_template_1g_copper = {
2393         .ops            = &phy_ops_1g_copper,
2394         .phy_addr_base  = 0,
2395 };
2396
2397 static const struct niu_phy_template phy_template_1g_rgmii = {
2398         .ops            = &phy_ops_1g_rgmii,
2399         .phy_addr_base  = 0,
2400 };
2401
2402 static const struct niu_phy_template phy_template_10g_serdes = {
2403         .ops            = &phy_ops_10g_serdes,
2404         .phy_addr_base  = 0,
2405 };
2406
2407 static int niu_atca_port_num[4] = {
2408         0, 0,  11, 10
2409 };
2410
2411 static int serdes_init_10g_serdes(struct niu *np)
2412 {
2413         struct niu_link_config *lp = &np->link_config;
2414         unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i;
2415         u64 ctrl_val, test_cfg_val, sig, mask, val;
2416         u64 reset_val;
2417
2418         switch (np->port) {
2419         case 0:
2420                 reset_val =  ENET_SERDES_RESET_0;
2421                 ctrl_reg = ENET_SERDES_0_CTRL_CFG;
2422                 test_cfg_reg = ENET_SERDES_0_TEST_CFG;
2423                 pll_cfg = ENET_SERDES_0_PLL_CFG;
2424                 break;
2425         case 1:
2426                 reset_val =  ENET_SERDES_RESET_1;
2427                 ctrl_reg = ENET_SERDES_1_CTRL_CFG;
2428                 test_cfg_reg = ENET_SERDES_1_TEST_CFG;
2429                 pll_cfg = ENET_SERDES_1_PLL_CFG;
2430                 break;
2431
2432         default:
2433                 return -EINVAL;
2434         }
2435         ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
2436                     ENET_SERDES_CTRL_SDET_1 |
2437                     ENET_SERDES_CTRL_SDET_2 |
2438                     ENET_SERDES_CTRL_SDET_3 |
2439                     (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
2440                     (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
2441                     (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
2442                     (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
2443                     (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
2444                     (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
2445                     (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
2446                     (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
2447         test_cfg_val = 0;
2448
2449         if (lp->loopback_mode == LOOPBACK_PHY) {
2450                 test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
2451                                   ENET_SERDES_TEST_MD_0_SHIFT) |
2452                                  (ENET_TEST_MD_PAD_LOOPBACK <<
2453                                   ENET_SERDES_TEST_MD_1_SHIFT) |
2454                                  (ENET_TEST_MD_PAD_LOOPBACK <<
2455                                   ENET_SERDES_TEST_MD_2_SHIFT) |
2456                                  (ENET_TEST_MD_PAD_LOOPBACK <<
2457                                   ENET_SERDES_TEST_MD_3_SHIFT));
2458         }
2459
2460         esr_reset(np);
2461         nw64(pll_cfg, ENET_SERDES_PLL_FBDIV2);
2462         nw64(ctrl_reg, ctrl_val);
2463         nw64(test_cfg_reg, test_cfg_val);
2464
2465         /* Initialize all 4 lanes of the SERDES.  */
2466         for (i = 0; i < 4; i++) {
2467                 u32 rxtx_ctrl, glue0;
2468                 int err;
2469
2470                 err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
2471                 if (err)
2472                         return err;
2473                 err = esr_read_glue0(np, i, &glue0);
2474                 if (err)
2475                         return err;
2476
2477                 rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
2478                 rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
2479                               (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
2480
2481                 glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
2482                            ESR_GLUE_CTRL0_THCNT |
2483                            ESR_GLUE_CTRL0_BLTIME);
2484                 glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
2485                           (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
2486                           (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
2487                           (BLTIME_300_CYCLES <<
2488                            ESR_GLUE_CTRL0_BLTIME_SHIFT));
2489
2490                 err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
2491                 if (err)
2492                         return err;
2493                 err = esr_write_glue0(np, i, glue0);
2494                 if (err)
2495                         return err;
2496         }
2497
2498
2499         sig = nr64(ESR_INT_SIGNALS);
2500         switch (np->port) {
2501         case 0:
2502                 mask = ESR_INT_SIGNALS_P0_BITS;
2503                 val = (ESR_INT_SRDY0_P0 |
2504                        ESR_INT_DET0_P0 |
2505                        ESR_INT_XSRDY_P0 |
2506                        ESR_INT_XDP_P0_CH3 |
2507                        ESR_INT_XDP_P0_CH2 |
2508                        ESR_INT_XDP_P0_CH1 |
2509                        ESR_INT_XDP_P0_CH0);
2510                 break;
2511
2512         case 1:
2513                 mask = ESR_INT_SIGNALS_P1_BITS;
2514                 val = (ESR_INT_SRDY0_P1 |
2515                        ESR_INT_DET0_P1 |
2516                        ESR_INT_XSRDY_P1 |
2517                        ESR_INT_XDP_P1_CH3 |
2518                        ESR_INT_XDP_P1_CH2 |
2519                        ESR_INT_XDP_P1_CH1 |
2520                        ESR_INT_XDP_P1_CH0);
2521                 break;
2522
2523         default:
2524                 return -EINVAL;
2525         }
2526
2527         if ((sig & mask) != val) {
2528                 int err;
2529                 err = serdes_init_1g_serdes(np);
2530                 if (!err) {
2531                         np->flags &= ~NIU_FLAGS_10G;
2532                         np->mac_xcvr = MAC_XCVR_PCS;
2533                 }  else {
2534                         dev_err(np->device, PFX "Port %u 10G/1G SERDES Link Failed \n",
2535                          np->port);
2536                         return -ENODEV;
2537                 }
2538         }
2539
2540         return 0;
2541 }
2542
2543 static int niu_determine_phy_disposition(struct niu *np)
2544 {
2545         struct niu_parent *parent = np->parent;
2546         u8 plat_type = parent->plat_type;
2547         const struct niu_phy_template *tp;
2548         u32 phy_addr_off = 0;
2549
2550         if (plat_type == PLAT_TYPE_NIU) {
2551                 switch (np->flags &
2552                         (NIU_FLAGS_10G |
2553                          NIU_FLAGS_FIBER |
2554                          NIU_FLAGS_XCVR_SERDES)) {
2555                 case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
2556                         /* 10G Serdes */
2557                         tp = &phy_template_niu_10g_serdes;
2558                         break;
2559                 case NIU_FLAGS_XCVR_SERDES:
2560                         /* 1G Serdes */
2561                         tp = &phy_template_niu_1g_serdes;
2562                         break;
2563                 case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
2564                         /* 10G Fiber */
2565                 default:
2566                         if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
2567                                 tp = &phy_template_niu_10g_hotplug;
2568                                 if (np->port == 0)
2569                                         phy_addr_off = 8;
2570                                 if (np->port == 1)
2571                                         phy_addr_off = 12;
2572                         } else {
2573                                 tp = &phy_template_niu_10g_fiber;
2574                                 phy_addr_off += np->port;
2575                         }
2576                         break;
2577                 }
2578         } else {
2579                 switch (np->flags &
2580                         (NIU_FLAGS_10G |
2581                          NIU_FLAGS_FIBER |
2582                          NIU_FLAGS_XCVR_SERDES)) {
2583                 case 0:
2584                         /* 1G copper */
2585                         tp = &phy_template_1g_copper;
2586                         if (plat_type == PLAT_TYPE_VF_P0)
2587                                 phy_addr_off = 10;
2588                         else if (plat_type == PLAT_TYPE_VF_P1)
2589                                 phy_addr_off = 26;
2590
2591                         phy_addr_off += (np->port ^ 0x3);
2592                         break;
2593
2594                 case NIU_FLAGS_10G:
2595                         /* 10G copper */
2596                         tp = &phy_template_10g_copper;
2597                         break;
2598
2599                 case NIU_FLAGS_FIBER:
2600                         /* 1G fiber */
2601                         tp = &phy_template_1g_fiber;
2602                         break;
2603
2604                 case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
2605                         /* 10G fiber */
2606                         tp = &phy_template_10g_fiber;
2607                         if (plat_type == PLAT_TYPE_VF_P0 ||
2608                             plat_type == PLAT_TYPE_VF_P1)
2609                                 phy_addr_off = 8;
2610                         phy_addr_off += np->port;
2611                         if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
2612                                 tp = &phy_template_10g_fiber_hotplug;
2613                                 if (np->port == 0)
2614                                         phy_addr_off = 8;
2615                                 if (np->port == 1)
2616                                         phy_addr_off = 12;
2617                         }
2618                         break;
2619
2620                 case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
2621                 case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER:
2622                 case NIU_FLAGS_XCVR_SERDES:
2623                         switch(np->port) {
2624                         case 0:
2625                         case 1:
2626                                 tp = &phy_template_10g_serdes;
2627                                 break;
2628                         case 2:
2629                         case 3:
2630                                 tp = &phy_template_1g_rgmii;
2631                                 break;
2632                         default:
2633                                 return -EINVAL;
2634                                 break;
2635                         }
2636                         phy_addr_off = niu_atca_port_num[np->port];
2637                         break;
2638
2639                 default:
2640                         return -EINVAL;
2641                 }
2642         }
2643
2644         np->phy_ops = tp->ops;
2645         np->phy_addr = tp->phy_addr_base + phy_addr_off;
2646
2647         return 0;
2648 }
2649
2650 static int niu_init_link(struct niu *np)
2651 {
2652         struct niu_parent *parent = np->parent;
2653         int err, ignore;
2654
2655         if (parent->plat_type == PLAT_TYPE_NIU) {
2656                 err = niu_xcvr_init(np);
2657                 if (err)
2658                         return err;
2659                 msleep(200);
2660         }
2661         err = niu_serdes_init(np);
2662         if (err && !(np->flags & NIU_FLAGS_HOTPLUG_PHY))
2663                 return err;
2664         msleep(200);
2665         err = niu_xcvr_init(np);
2666         if (!err || (np->flags & NIU_FLAGS_HOTPLUG_PHY))
2667                 niu_link_status(np, &ignore);
2668         return 0;
2669 }
2670
2671 static void niu_set_primary_mac(struct niu *np, unsigned char *addr)
2672 {
2673         u16 reg0 = addr[4] << 8 | addr[5];
2674         u16 reg1 = addr[2] << 8 | addr[3];
2675         u16 reg2 = addr[0] << 8 | addr[1];
2676
2677         if (np->flags & NIU_FLAGS_XMAC) {
2678                 nw64_mac(XMAC_ADDR0, reg0);
2679                 nw64_mac(XMAC_ADDR1, reg1);
2680                 nw64_mac(XMAC_ADDR2, reg2);
2681         } else {
2682                 nw64_mac(BMAC_ADDR0, reg0);
2683                 nw64_mac(BMAC_ADDR1, reg1);
2684                 nw64_mac(BMAC_ADDR2, reg2);
2685         }
2686 }
2687
2688 static int niu_num_alt_addr(struct niu *np)
2689 {
2690         if (np->flags & NIU_FLAGS_XMAC)
2691                 return XMAC_NUM_ALT_ADDR;
2692         else
2693                 return BMAC_NUM_ALT_ADDR;
2694 }
2695
2696 static int niu_set_alt_mac(struct niu *np, int index, unsigned char *addr)
2697 {
2698         u16 reg0 = addr[4] << 8 | addr[5];
2699         u16 reg1 = addr[2] << 8 | addr[3];
2700         u16 reg2 = addr[0] << 8 | addr[1];
2701
2702         if (index >= niu_num_alt_addr(np))
2703                 return -EINVAL;
2704
2705         if (np->flags & NIU_FLAGS_XMAC) {
2706                 nw64_mac(XMAC_ALT_ADDR0(index), reg0);
2707                 nw64_mac(XMAC_ALT_ADDR1(index), reg1);
2708                 nw64_mac(XMAC_ALT_ADDR2(index), reg2);
2709         } else {
2710                 nw64_mac(BMAC_ALT_ADDR0(index), reg0);
2711                 nw64_mac(BMAC_ALT_ADDR1(index), reg1);
2712                 nw64_mac(BMAC_ALT_ADDR2(index), reg2);
2713         }
2714
2715         return 0;
2716 }
2717
2718 static int niu_enable_alt_mac(struct niu *np, int index, int on)
2719 {
2720         unsigned long reg;
2721         u64 val, mask;
2722
2723         if (index >= niu_num_alt_addr(np))
2724                 return -EINVAL;
2725
2726         if (np->flags & NIU_FLAGS_XMAC) {
2727                 reg = XMAC_ADDR_CMPEN;
2728                 mask = 1 << index;
2729         } else {
2730                 reg = BMAC_ADDR_CMPEN;
2731                 mask = 1 << (index + 1);
2732         }
2733
2734         val = nr64_mac(reg);
2735         if (on)
2736                 val |= mask;
2737         else
2738                 val &= ~mask;
2739         nw64_mac(reg, val);
2740
2741         return 0;
2742 }
2743
2744 static void __set_rdc_table_num_hw(struct niu *np, unsigned long reg,
2745                                    int num, int mac_pref)
2746 {
2747         u64 val = nr64_mac(reg);
2748         val &= ~(HOST_INFO_MACRDCTBLN | HOST_INFO_MPR);
2749         val |= num;
2750         if (mac_pref)
2751                 val |= HOST_INFO_MPR;
2752         nw64_mac(reg, val);
2753 }
2754
2755 static int __set_rdc_table_num(struct niu *np,
2756                                int xmac_index, int bmac_index,
2757                                int rdc_table_num, int mac_pref)
2758 {
2759         unsigned long reg;
2760
2761         if (rdc_table_num & ~HOST_INFO_MACRDCTBLN)
2762                 return -EINVAL;
2763         if (np->flags & NIU_FLAGS_XMAC)
2764                 reg = XMAC_HOST_INFO(xmac_index);
2765         else
2766                 reg = BMAC_HOST_INFO(bmac_index);
2767         __set_rdc_table_num_hw(np, reg, rdc_table_num, mac_pref);
2768         return 0;
2769 }
2770
2771 static int niu_set_primary_mac_rdc_table(struct niu *np, int table_num,
2772                                          int mac_pref)
2773 {
2774         return __set_rdc_table_num(np, 17, 0, table_num, mac_pref);
2775 }
2776
2777 static int niu_set_multicast_mac_rdc_table(struct niu *np, int table_num,
2778                                            int mac_pref)
2779 {
2780         return __set_rdc_table_num(np, 16, 8, table_num, mac_pref);
2781 }
2782
2783 static int niu_set_alt_mac_rdc_table(struct niu *np, int idx,
2784                                      int table_num, int mac_pref)
2785 {
2786         if (idx >= niu_num_alt_addr(np))
2787                 return -EINVAL;
2788         return __set_rdc_table_num(np, idx, idx + 1, table_num, mac_pref);
2789 }
2790
2791 static u64 vlan_entry_set_parity(u64 reg_val)
2792 {
2793         u64 port01_mask;
2794         u64 port23_mask;
2795
2796         port01_mask = 0x00ff;
2797         port23_mask = 0xff00;
2798
2799         if (hweight64(reg_val & port01_mask) & 1)
2800                 reg_val |= ENET_VLAN_TBL_PARITY0;
2801         else
2802                 reg_val &= ~ENET_VLAN_TBL_PARITY0;
2803
2804         if (hweight64(reg_val & port23_mask) & 1)
2805                 reg_val |= ENET_VLAN_TBL_PARITY1;
2806         else
2807                 reg_val &= ~ENET_VLAN_TBL_PARITY1;
2808
2809         return reg_val;
2810 }
2811
2812 static void vlan_tbl_write(struct niu *np, unsigned long index,
2813                            int port, int vpr, int rdc_table)
2814 {
2815         u64 reg_val = nr64(ENET_VLAN_TBL(index));
2816
2817         reg_val &= ~((ENET_VLAN_TBL_VPR |
2818                       ENET_VLAN_TBL_VLANRDCTBLN) <<
2819                      ENET_VLAN_TBL_SHIFT(port));
2820         if (vpr)
2821                 reg_val |= (ENET_VLAN_TBL_VPR <<
2822                             ENET_VLAN_TBL_SHIFT(port));
2823         reg_val |= (rdc_table << ENET_VLAN_TBL_SHIFT(port));
2824
2825         reg_val = vlan_entry_set_parity(reg_val);
2826
2827         nw64(ENET_VLAN_TBL(index), reg_val);
2828 }
2829
2830 static void vlan_tbl_clear(struct niu *np)
2831 {
2832         int i;
2833
2834         for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++)
2835                 nw64(ENET_VLAN_TBL(i), 0);
2836 }
2837
2838 static int tcam_wait_bit(struct niu *np, u64 bit)
2839 {
2840         int limit = 1000;
2841
2842         while (--limit > 0) {
2843                 if (nr64(TCAM_CTL) & bit)
2844                         break;
2845                 udelay(1);
2846         }
2847         if (limit <= 0)
2848                 return -ENODEV;
2849
2850         return 0;
2851 }
2852
2853 static int tcam_flush(struct niu *np, int index)
2854 {
2855         nw64(TCAM_KEY_0, 0x00);
2856         nw64(TCAM_KEY_MASK_0, 0xff);
2857         nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index));
2858
2859         return tcam_wait_bit(np, TCAM_CTL_STAT);
2860 }
2861
2862 #if 0
2863 static int tcam_read(struct niu *np, int index,
2864                      u64 *key, u64 *mask)
2865 {
2866         int err;
2867
2868         nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_READ | index));
2869         err = tcam_wait_bit(np, TCAM_CTL_STAT);
2870         if (!err) {
2871                 key[0] = nr64(TCAM_KEY_0);
2872                 key[1] = nr64(TCAM_KEY_1);
2873                 key[2] = nr64(TCAM_KEY_2);
2874                 key[3] = nr64(TCAM_KEY_3);
2875                 mask[0] = nr64(TCAM_KEY_MASK_0);
2876                 mask[1] = nr64(TCAM_KEY_MASK_1);
2877                 mask[2] = nr64(TCAM_KEY_MASK_2);
2878                 mask[3] = nr64(TCAM_KEY_MASK_3);
2879         }
2880         return err;
2881 }
2882 #endif
2883
2884 static int tcam_write(struct niu *np, int index,
2885                       u64 *key, u64 *mask)
2886 {
2887         nw64(TCAM_KEY_0, key[0]);
2888         nw64(TCAM_KEY_1, key[1]);
2889         nw64(TCAM_KEY_2, key[2]);
2890         nw64(TCAM_KEY_3, key[3]);
2891         nw64(TCAM_KEY_MASK_0, mask[0]);
2892         nw64(TCAM_KEY_MASK_1, mask[1]);
2893         nw64(TCAM_KEY_MASK_2, mask[2]);
2894         nw64(TCAM_KEY_MASK_3, mask[3]);
2895         nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index));
2896
2897         return tcam_wait_bit(np, TCAM_CTL_STAT);
2898 }
2899
2900 #if 0
2901 static int tcam_assoc_read(struct niu *np, int index, u64 *data)
2902 {
2903         int err;
2904
2905         nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_READ | index));
2906         err = tcam_wait_bit(np, TCAM_CTL_STAT);
2907         if (!err)
2908                 *data = nr64(TCAM_KEY_1);
2909
2910         return err;
2911 }
2912 #endif
2913
2914 static int tcam_assoc_write(struct niu *np, int index, u64 assoc_data)
2915 {
2916         nw64(TCAM_KEY_1, assoc_data);
2917         nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_WRITE | index));
2918
2919         return tcam_wait_bit(np, TCAM_CTL_STAT);
2920 }
2921
2922 static void tcam_enable(struct niu *np, int on)
2923 {
2924         u64 val = nr64(FFLP_CFG_1);
2925
2926         if (on)
2927                 val &= ~FFLP_CFG_1_TCAM_DIS;
2928         else
2929                 val |= FFLP_CFG_1_TCAM_DIS;
2930         nw64(FFLP_CFG_1, val);
2931 }
2932
2933 static void tcam_set_lat_and_ratio(struct niu *np, u64 latency, u64 ratio)
2934 {
2935         u64 val = nr64(FFLP_CFG_1);
2936
2937         val &= ~(FFLP_CFG_1_FFLPINITDONE |
2938                  FFLP_CFG_1_CAMLAT |
2939                  FFLP_CFG_1_CAMRATIO);
2940         val |= (latency << FFLP_CFG_1_CAMLAT_SHIFT);
2941         val |= (ratio << FFLP_CFG_1_CAMRATIO_SHIFT);
2942         nw64(FFLP_CFG_1, val);
2943
2944         val = nr64(FFLP_CFG_1);
2945         val |= FFLP_CFG_1_FFLPINITDONE;
2946         nw64(FFLP_CFG_1, val);
2947 }
2948
2949 static int tcam_user_eth_class_enable(struct niu *np, unsigned long class,
2950                                       int on)
2951 {
2952         unsigned long reg;
2953         u64 val;
2954
2955         if (class < CLASS_CODE_ETHERTYPE1 ||
2956             class > CLASS_CODE_ETHERTYPE2)
2957                 return -EINVAL;
2958
2959         reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1);
2960         val = nr64(reg);
2961         if (on)
2962                 val |= L2_CLS_VLD;
2963         else
2964                 val &= ~L2_CLS_VLD;
2965         nw64(reg, val);
2966
2967         return 0;
2968 }
2969
2970 #if 0
2971 static int tcam_user_eth_class_set(struct niu *np, unsigned long class,
2972                                    u64 ether_type)
2973 {
2974         unsigned long reg;
2975         u64 val;
2976
2977         if (class < CLASS_CODE_ETHERTYPE1 ||
2978             class > CLASS_CODE_ETHERTYPE2 ||
2979             (ether_type & ~(u64)0xffff) != 0)
2980                 return -EINVAL;
2981
2982         reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1);
2983         val = nr64(reg);
2984         val &= ~L2_CLS_ETYPE;
2985         val |= (ether_type << L2_CLS_ETYPE_SHIFT);
2986         nw64(reg, val);
2987
2988         return 0;
2989 }
2990 #endif
2991
2992 static int tcam_user_ip_class_enable(struct niu *np, unsigned long class,
2993                                      int on)
2994 {
2995         unsigned long reg;
2996         u64 val;
2997
2998         if (class < CLASS_CODE_USER_PROG1 ||
2999             class > CLASS_CODE_USER_PROG4)
3000                 return -EINVAL;
3001
3002         reg = L3_CLS(class - CLASS_CODE_USER_PROG1);
3003         val = nr64(reg);
3004         if (on)
3005                 val |= L3_CLS_VALID;
3006         else
3007                 val &= ~L3_CLS_VALID;
3008         nw64(reg, val);
3009
3010         return 0;
3011 }
3012
3013 static int tcam_user_ip_class_set(struct niu *np, unsigned long class,
3014                                   int ipv6, u64 protocol_id,
3015                                   u64 tos_mask, u64 tos_val)
3016 {
3017         unsigned long reg;
3018         u64 val;
3019
3020         if (class < CLASS_CODE_USER_PROG1 ||
3021             class > CLASS_CODE_USER_PROG4 ||
3022             (protocol_id & ~(u64)0xff) != 0 ||
3023             (tos_mask & ~(u64)0xff) != 0 ||
3024             (tos_val & ~(u64)0xff) != 0)
3025                 return -EINVAL;
3026
3027         reg = L3_CLS(class - CLASS_CODE_USER_PROG1);
3028         val = nr64(reg);
3029         val &= ~(L3_CLS_IPVER | L3_CLS_PID |
3030                  L3_CLS_TOSMASK | L3_CLS_TOS);
3031         if (ipv6)
3032                 val |= L3_CLS_IPVER;
3033         val |= (protocol_id << L3_CLS_PID_SHIFT);
3034         val |= (tos_mask << L3_CLS_TOSMASK_SHIFT);
3035         val |= (tos_val << L3_CLS_TOS_SHIFT);
3036         nw64(reg, val);
3037
3038         return 0;
3039 }
3040
3041 static int tcam_early_init(struct niu *np)
3042 {
3043         unsigned long i;
3044         int err;
3045
3046         tcam_enable(np, 0);
3047         tcam_set_lat_and_ratio(np,
3048                                DEFAULT_TCAM_LATENCY,
3049                                DEFAULT_TCAM_ACCESS_RATIO);
3050         for (i = CLASS_CODE_ETHERTYPE1; i <= CLASS_CODE_ETHERTYPE2; i++) {
3051                 err = tcam_user_eth_class_enable(np, i, 0);
3052                 if (err)
3053                         return err;
3054         }
3055         for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_USER_PROG4; i++) {
3056                 err = tcam_user_ip_class_enable(np, i, 0);
3057                 if (err)
3058                         return err;
3059         }
3060
3061         return 0;
3062 }
3063
3064 static int tcam_flush_all(struct niu *np)
3065 {
3066         unsigned long i;
3067
3068         for (i = 0; i < np->parent->tcam_num_entries; i++) {
3069                 int err = tcam_flush(np, i);
3070                 if (err)
3071                         return err;
3072         }
3073         return 0;
3074 }
3075
3076 static u64 hash_addr_regval(unsigned long index, unsigned long num_entries)
3077 {
3078         return ((u64)index | (num_entries == 1 ?
3079                               HASH_TBL_ADDR_AUTOINC : 0));
3080 }
3081
3082 #if 0
3083 static int hash_read(struct niu *np, unsigned long partition,
3084                      unsigned long index, unsigned long num_entries,
3085                      u64 *data)
3086 {
3087         u64 val = hash_addr_regval(index, num_entries);
3088         unsigned long i;
3089
3090         if (partition >= FCRAM_NUM_PARTITIONS ||
3091             index + num_entries > FCRAM_SIZE)
3092                 return -EINVAL;
3093
3094         nw64(HASH_TBL_ADDR(partition), val);
3095         for (i = 0; i < num_entries; i++)
3096                 data[i] = nr64(HASH_TBL_DATA(partition));
3097
3098         return 0;
3099 }
3100 #endif
3101
3102 static int hash_write(struct niu *np, unsigned long partition,
3103                       unsigned long index, unsigned long num_entries,
3104                       u64 *data)
3105 {
3106         u64 val = hash_addr_regval(index, num_entries);
3107         unsigned long i;
3108
3109         if (partition >= FCRAM_NUM_PARTITIONS ||
3110             index + (num_entries * 8) > FCRAM_SIZE)
3111                 return -EINVAL;
3112
3113         nw64(HASH_TBL_ADDR(partition), val);
3114         for (i = 0; i < num_entries; i++)
3115                 nw64(HASH_TBL_DATA(partition), data[i]);
3116
3117         return 0;
3118 }
3119
3120 static void fflp_reset(struct niu *np)
3121 {
3122         u64 val;
3123
3124         nw64(FFLP_CFG_1, FFLP_CFG_1_PIO_FIO_RST);
3125         udelay(10);
3126         nw64(FFLP_CFG_1, 0);
3127
3128         val = FFLP_CFG_1_FCRAMOUTDR_NORMAL | FFLP_CFG_1_FFLPINITDONE;
3129         nw64(FFLP_CFG_1, val);
3130 }
3131
3132 static void fflp_set_timings(struct niu *np)
3133 {
3134         u64 val = nr64(FFLP_CFG_1);
3135
3136         val &= ~FFLP_CFG_1_FFLPINITDONE;
3137         val |= (DEFAULT_FCRAMRATIO << FFLP_CFG_1_FCRAMRATIO_SHIFT);
3138         nw64(FFLP_CFG_1, val);
3139
3140         val = nr64(FFLP_CFG_1);
3141         val |= FFLP_CFG_1_FFLPINITDONE;
3142         nw64(FFLP_CFG_1, val);
3143
3144         val = nr64(FCRAM_REF_TMR);
3145         val &= ~(FCRAM_REF_TMR_MAX | FCRAM_REF_TMR_MIN);
3146         val |= (DEFAULT_FCRAM_REFRESH_MAX << FCRAM_REF_TMR_MAX_SHIFT);
3147         val |= (DEFAULT_FCRAM_REFRESH_MIN << FCRAM_REF_TMR_MIN_SHIFT);
3148         nw64(FCRAM_REF_TMR, val);
3149 }
3150
3151 static int fflp_set_partition(struct niu *np, u64 partition,
3152                               u64 mask, u64 base, int enable)
3153 {
3154         unsigned long reg;
3155         u64 val;
3156
3157         if (partition >= FCRAM_NUM_PARTITIONS ||
3158             (mask & ~(u64)0x1f) != 0 ||
3159             (base & ~(u64)0x1f) != 0)
3160                 return -EINVAL;
3161
3162         reg = FLW_PRT_SEL(partition);
3163
3164         val = nr64(reg);
3165         val &= ~(FLW_PRT_SEL_EXT | FLW_PRT_SEL_MASK | FLW_PRT_SEL_BASE);
3166         val |= (mask << FLW_PRT_SEL_MASK_SHIFT);
3167         val |= (base << FLW_PRT_SEL_BASE_SHIFT);
3168         if (enable)
3169                 val |= FLW_PRT_SEL_EXT;
3170         nw64(reg, val);
3171
3172         return 0;
3173 }
3174
3175 static int fflp_disable_all_partitions(struct niu *np)
3176 {
3177         unsigned long i;
3178
3179         for (i = 0; i < FCRAM_NUM_PARTITIONS; i++) {
3180                 int err = fflp_set_partition(np, 0, 0, 0, 0);
3181                 if (err)
3182                         return err;
3183         }
3184         return 0;
3185 }
3186
3187 static void fflp_llcsnap_enable(struct niu *np, int on)
3188 {
3189         u64 val = nr64(FFLP_CFG_1);
3190
3191         if (on)
3192                 val |= FFLP_CFG_1_LLCSNAP;
3193         else
3194                 val &= ~FFLP_CFG_1_LLCSNAP;
3195         nw64(FFLP_CFG_1, val);
3196 }
3197
3198 static void fflp_errors_enable(struct niu *np, int on)
3199 {
3200         u64 val = nr64(FFLP_CFG_1);
3201
3202         if (on)
3203                 val &= ~FFLP_CFG_1_ERRORDIS;
3204         else
3205                 val |= FFLP_CFG_1_ERRORDIS;
3206         nw64(FFLP_CFG_1, val);
3207 }
3208
3209 static int fflp_hash_clear(struct niu *np)
3210 {
3211         struct fcram_hash_ipv4 ent;
3212         unsigned long i;
3213
3214         /* IPV4 hash entry with valid bit clear, rest is don't care.  */
3215         memset(&ent, 0, sizeof(ent));
3216         ent.header = HASH_HEADER_EXT;
3217
3218         for (i = 0; i < FCRAM_SIZE; i += sizeof(ent)) {
3219                 int err = hash_write(np, 0, i, 1, (u64 *) &ent);
3220                 if (err)
3221                         return err;
3222         }
3223         return 0;
3224 }
3225
3226 static int fflp_early_init(struct niu *np)
3227 {
3228         struct niu_parent *parent;
3229         unsigned long flags;
3230         int err;
3231
3232         niu_lock_parent(np, flags);
3233
3234         parent = np->parent;
3235         err = 0;
3236         if (!(parent->flags & PARENT_FLGS_CLS_HWINIT)) {
3237                 niudbg(PROBE, "fflp_early_init: Initting hw on port %u\n",
3238                        np->port);
3239                 if (np->parent->plat_type != PLAT_TYPE_NIU) {
3240                         fflp_reset(np);
3241                         fflp_set_timings(np);
3242                         err = fflp_disable_all_partitions(np);
3243                         if (err) {
3244                                 niudbg(PROBE, "fflp_disable_all_partitions "
3245                                        "failed, err=%d\n", err);
3246                                 goto out;
3247                         }
3248                 }
3249
3250                 err = tcam_early_init(np);
3251                 if (err) {
3252                         niudbg(PROBE, "tcam_early_init failed, err=%d\n",
3253                                err);
3254                         goto out;
3255                 }
3256                 fflp_llcsnap_enable(np, 1);
3257                 fflp_errors_enable(np, 0);
3258                 nw64(H1POLY, 0);
3259                 nw64(H2POLY, 0);
3260
3261                 err = tcam_flush_all(np);
3262                 if (err) {
3263                         niudbg(PROBE, "tcam_flush_all failed, err=%d\n",
3264                                err);
3265                         goto out;
3266                 }
3267                 if (np->parent->plat_type != PLAT_TYPE_NIU) {
3268                         err = fflp_hash_clear(np);
3269                         if (err) {
3270                                 niudbg(PROBE, "fflp_hash_clear failed, "
3271                                        "err=%d\n", err);
3272                                 goto out;
3273                         }
3274                 }
3275
3276                 vlan_tbl_clear(np);
3277
3278                 niudbg(PROBE, "fflp_early_init: Success\n");
3279                 parent->flags |= PARENT_FLGS_CLS_HWINIT;
3280         }
3281 out:
3282         niu_unlock_parent(np, flags);
3283         return err;
3284 }
3285
3286 static int niu_set_flow_key(struct niu *np, unsigned long class_code, u64 key)
3287 {
3288         if (class_code < CLASS_CODE_USER_PROG1 ||
3289             class_code > CLASS_CODE_SCTP_IPV6)
3290                 return -EINVAL;
3291
3292         nw64(FLOW_KEY(class_code - CLASS_CODE_USER_PROG1), key);
3293         return 0;
3294 }
3295
3296 static int niu_set_tcam_key(struct niu *np, unsigned long class_code, u64 key)
3297 {
3298         if (class_code < CLASS_CODE_USER_PROG1 ||
3299             class_code > CLASS_CODE_SCTP_IPV6)
3300                 return -EINVAL;
3301
3302         nw64(TCAM_KEY(class_code - CLASS_CODE_USER_PROG1), key);
3303         return 0;
3304 }
3305
3306 /* Entries for the ports are interleaved in the TCAM */
3307 static u16 tcam_get_index(struct niu *np, u16 idx)
3308 {
3309         /* One entry reserved for IP fragment rule */
3310         if (idx >= (np->clas.tcam_sz - 1))
3311                 idx = 0;
3312         return (np->clas.tcam_top + ((idx+1) * np->parent->num_ports));
3313 }
3314
3315 static u16 tcam_get_size(struct niu *np)
3316 {
3317         /* One entry reserved for IP fragment rule */
3318         return np->clas.tcam_sz - 1;
3319 }
3320
3321 static u16 tcam_get_valid_entry_cnt(struct niu *np)
3322 {
3323         /* One entry reserved for IP fragment rule */
3324         return np->clas.tcam_valid_entries - 1;
3325 }
3326
3327 static void niu_rx_skb_append(struct sk_buff *skb, struct page *page,
3328                               u32 offset, u32 size)
3329 {
3330         int i = skb_shinfo(skb)->nr_frags;
3331         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3332
3333         frag->page = page;
3334         frag->page_offset = offset;
3335         frag->size = size;
3336
3337         skb->len += size;
3338         skb->data_len += size;
3339         skb->truesize += size;
3340
3341         skb_shinfo(skb)->nr_frags = i + 1;
3342 }
3343
3344 static unsigned int niu_hash_rxaddr(struct rx_ring_info *rp, u64 a)
3345 {
3346         a >>= PAGE_SHIFT;
3347         a ^= (a >> ilog2(MAX_RBR_RING_SIZE));
3348
3349         return (a & (MAX_RBR_RING_SIZE - 1));
3350 }
3351
3352 static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr,
3353                                     struct page ***link)
3354 {
3355         unsigned int h = niu_hash_rxaddr(rp, addr);
3356         struct page *p, **pp;
3357
3358         addr &= PAGE_MASK;
3359         pp = &rp->rxhash[h];
3360         for (; (p = *pp) != NULL; pp = (struct page **) &p->mapping) {
3361                 if (p->index == addr) {
3362                         *link = pp;
3363                         break;
3364                 }
3365         }
3366
3367         return p;
3368 }
3369
3370 static void niu_hash_page(struct rx_ring_info *rp, struct page *page, u64 base)
3371 {
3372         unsigned int h = niu_hash_rxaddr(rp, base);
3373
3374         page->index = base;
3375         page->mapping = (struct address_space *) rp->rxhash[h];
3376         rp->rxhash[h] = page;
3377 }
3378
3379 static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp,
3380                             gfp_t mask, int start_index)
3381 {
3382         struct page *page;
3383         u64 addr;
3384         int i;
3385
3386         page = alloc_page(mask);
3387         if (!page)
3388                 return -ENOMEM;
3389
3390         addr = np->ops->map_page(np->device, page, 0,
3391                                  PAGE_SIZE, DMA_FROM_DEVICE);
3392
3393         niu_hash_page(rp, page, addr);
3394         if (rp->rbr_blocks_per_page > 1)
3395                 atomic_add(rp->rbr_blocks_per_page - 1,
3396                            &compound_head(page)->_count);
3397
3398         for (i = 0; i < rp->rbr_blocks_per_page; i++) {
3399                 __le32 *rbr = &rp->rbr[start_index + i];
3400
3401                 *rbr = cpu_to_le32(addr >> RBR_DESCR_ADDR_SHIFT);
3402                 addr += rp->rbr_block_size;
3403         }
3404
3405         return 0;
3406 }
3407
3408 static void niu_rbr_refill(struct niu *np, struct rx_ring_info *rp, gfp_t mask)
3409 {
3410         int index = rp->rbr_index;
3411
3412         rp->rbr_pending++;
3413         if ((rp->rbr_pending % rp->rbr_blocks_per_page) == 0) {
3414                 int err = niu_rbr_add_page(np, rp, mask, index);
3415
3416                 if (unlikely(err)) {
3417                         rp->rbr_pending--;
3418                         return;
3419                 }
3420
3421                 rp->rbr_index += rp->rbr_blocks_per_page;
3422                 BUG_ON(rp->rbr_index > rp->rbr_table_size);
3423                 if (rp->rbr_index == rp->rbr_table_size)
3424                         rp->rbr_index = 0;
3425
3426                 if (rp->rbr_pending >= rp->rbr_kick_thresh) {
3427                         nw64(RBR_KICK(rp->rx_channel), rp->rbr_pending);
3428                         rp->rbr_pending = 0;
3429                 }
3430         }
3431 }
3432
3433 static int niu_rx_pkt_ignore(struct niu *np, struct rx_ring_info *rp)
3434 {
3435         unsigned int index = rp->rcr_index;
3436         int num_rcr = 0;
3437
3438         rp->rx_dropped++;
3439         while (1) {
3440                 struct page *page, **link;
3441                 u64 addr, val;
3442                 u32 rcr_size;
3443
3444                 num_rcr++;
3445
3446                 val = le64_to_cpup(&rp->rcr[index]);
3447                 addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
3448                         RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
3449                 page = niu_find_rxpage(rp, addr, &link);
3450
3451                 rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
3452                                          RCR_ENTRY_PKTBUFSZ_SHIFT];
3453                 if ((page->index + PAGE_SIZE) - rcr_size == addr) {
3454                         *link = (struct page *) page->mapping;
3455                         np->ops->unmap_page(np->device, page->index,
3456                                             PAGE_SIZE, DMA_FROM_DEVICE);
3457                         page->index = 0;
3458                         page->mapping = NULL;
3459                         __free_page(page);
3460                         rp->rbr_refill_pending++;
3461                 }
3462
3463                 index = NEXT_RCR(rp, index);
3464                 if (!(val & RCR_ENTRY_MULTI))
3465                         break;
3466
3467         }
3468         rp->rcr_index = index;
3469
3470         return num_rcr;
3471 }
3472
3473 static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
3474                               struct rx_ring_info *rp)
3475 {
3476         unsigned int index = rp->rcr_index;
3477         struct sk_buff *skb;
3478         int len, num_rcr;
3479
3480         skb = netdev_alloc_skb(np->dev, RX_SKB_ALLOC_SIZE);
3481         if (unlikely(!skb))
3482                 return niu_rx_pkt_ignore(np, rp);
3483
3484         num_rcr = 0;
3485         while (1) {
3486                 struct page *page, **link;
3487                 u32 rcr_size, append_size;
3488                 u64 addr, val, off;
3489
3490                 num_rcr++;
3491
3492                 val = le64_to_cpup(&rp->rcr[index]);
3493
3494                 len = (val & RCR_ENTRY_L2_LEN) >>
3495                         RCR_ENTRY_L2_LEN_SHIFT;
3496                 len -= ETH_FCS_LEN;
3497
3498                 addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
3499                         RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
3500                 page = niu_find_rxpage(rp, addr, &link);
3501
3502                 rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
3503                                          RCR_ENTRY_PKTBUFSZ_SHIFT];
3504
3505                 off = addr & ~PAGE_MASK;
3506                 append_size = rcr_size;
3507                 if (num_rcr == 1) {
3508                         int ptype;
3509
3510                         off += 2;
3511                         append_size -= 2;
3512
3513                         ptype = (val >> RCR_ENTRY_PKT_TYPE_SHIFT);
3514                         if ((ptype == RCR_PKT_TYPE_TCP ||
3515                              ptype == RCR_PKT_TYPE_UDP) &&
3516                             !(val & (RCR_ENTRY_NOPORT |
3517                                      RCR_ENTRY_ERROR)))
3518                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3519                         else
3520                                 skb->ip_summed = CHECKSUM_NONE;
3521                 }
3522                 if (!(val & RCR_ENTRY_MULTI))
3523                         append_size = len - skb->len;
3524
3525                 niu_rx_skb_append(skb, page, off, append_size);
3526                 if ((page->index + rp->rbr_block_size) - rcr_size == addr) {
3527                         *link = (struct page *) page->mapping;
3528                         np->ops->unmap_page(np->device, page->index,
3529                                             PAGE_SIZE, DMA_FROM_DEVICE);
3530                         page->index = 0;
3531                         page->mapping = NULL;
3532                         rp->rbr_refill_pending++;
3533                 } else
3534                         get_page(page);
3535
3536                 index = NEXT_RCR(rp, index);
3537                 if (!(val & RCR_ENTRY_MULTI))
3538                         break;
3539
3540         }
3541         rp->rcr_index = index;
3542
3543         skb_reserve(skb, NET_IP_ALIGN);
3544         __pskb_pull_tail(skb, min(len, VLAN_ETH_HLEN));
3545
3546         rp->rx_packets++;
3547         rp->rx_bytes += skb->len;
3548
3549         skb->protocol = eth_type_trans(skb, np->dev);
3550         skb_record_rx_queue(skb, rp->rx_channel);
3551         napi_gro_receive(napi, skb);
3552
3553         return num_rcr;
3554 }
3555
3556 static int niu_rbr_fill(struct niu *np, struct rx_ring_info *rp, gfp_t mask)
3557 {
3558         int blocks_per_page = rp->rbr_blocks_per_page;
3559         int err, index = rp->rbr_index;
3560
3561         err = 0;
3562         while (index < (rp->rbr_table_size - blocks_per_page)) {
3563                 err = niu_rbr_add_page(np, rp, mask, index);
3564                 if (err)
3565                         break;
3566
3567                 index += blocks_per_page;
3568         }
3569
3570         rp->rbr_index = index;
3571         return err;
3572 }
3573
3574 static void niu_rbr_free(struct niu *np, struct rx_ring_info *rp)
3575 {
3576         int i;
3577
3578         for (i = 0; i < MAX_RBR_RING_SIZE; i++) {
3579                 struct page *page;
3580
3581                 page = rp->rxhash[i];
3582                 while (page) {
3583                         struct page *next = (struct page *) page->mapping;
3584                         u64 base = page->index;
3585
3586                         np->ops->unmap_page(np->device, base, PAGE_SIZE,
3587                                             DMA_FROM_DEVICE);
3588                         page->index = 0;
3589                         page->mapping = NULL;
3590
3591                         __free_page(page);
3592
3593                         page = next;
3594                 }
3595         }
3596
3597         for (i = 0; i < rp->rbr_table_size; i++)
3598                 rp->rbr[i] = cpu_to_le32(0);
3599         rp->rbr_index = 0;
3600 }
3601
3602 static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx)
3603 {
3604         struct tx_buff_info *tb = &rp->tx_buffs[idx];
3605         struct sk_buff *skb = tb->skb;
3606         struct tx_pkt_hdr *tp;
3607         u64 tx_flags;
3608         int i, len;
3609
3610         tp = (struct tx_pkt_hdr *) skb->data;
3611         tx_flags = le64_to_cpup(&tp->flags);
3612
3613         rp->tx_packets++;
3614         rp->tx_bytes += (((tx_flags & TXHDR_LEN) >> TXHDR_LEN_SHIFT) -
3615                          ((tx_flags & TXHDR_PAD) / 2));
3616
3617         len = skb_headlen(skb);
3618         np->ops->unmap_single(np->device, tb->mapping,
3619                               len, DMA_TO_DEVICE);
3620
3621         if (le64_to_cpu(rp->descr[idx]) & TX_DESC_MARK)
3622                 rp->mark_pending--;
3623
3624         tb->skb = NULL;
3625         do {
3626                 idx = NEXT_TX(rp, idx);
3627                 len -= MAX_TX_DESC_LEN;
3628         } while (len > 0);
3629
3630         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3631                 tb = &rp->tx_buffs[idx];
3632                 BUG_ON(tb->skb != NULL);
3633                 np->ops->unmap_page(np->device, tb->mapping,
3634                                     skb_shinfo(skb)->frags[i].size,
3635                                     DMA_TO_DEVICE);
3636                 idx = NEXT_TX(rp, idx);
3637         }
3638
3639         dev_kfree_skb(skb);
3640
3641         return idx;
3642 }
3643
3644 #define NIU_TX_WAKEUP_THRESH(rp)                ((rp)->pending / 4)
3645
3646 static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
3647 {
3648         struct netdev_queue *txq;
3649         u16 pkt_cnt, tmp;
3650         int cons, index;
3651         u64 cs;
3652
3653         index = (rp - np->tx_rings);
3654         txq = netdev_get_tx_queue(np->dev, index);
3655
3656         cs = rp->tx_cs;
3657         if (unlikely(!(cs & (TX_CS_MK | TX_CS_MMK))))
3658                 goto out;
3659
3660         tmp = pkt_cnt = (cs & TX_CS_PKT_CNT) >> TX_CS_PKT_CNT_SHIFT;
3661         pkt_cnt = (pkt_cnt - rp->last_pkt_cnt) &
3662                 (TX_CS_PKT_CNT >> TX_CS_PKT_CNT_SHIFT);
3663
3664         rp->last_pkt_cnt = tmp;
3665
3666         cons = rp->cons;
3667
3668         niudbg(TX_DONE, "%s: niu_tx_work() pkt_cnt[%u] cons[%d]\n",
3669                np->dev->name, pkt_cnt, cons);
3670
3671         while (pkt_cnt--)
3672                 cons = release_tx_packet(np, rp, cons);
3673
3674         rp->cons = cons;
3675         smp_mb();
3676
3677 out:
3678         if (unlikely(netif_tx_queue_stopped(txq) &&
3679                      (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) {
3680                 __netif_tx_lock(txq, smp_processor_id());
3681                 if (netif_tx_queue_stopped(txq) &&
3682                     (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))
3683                         netif_tx_wake_queue(txq);
3684                 __netif_tx_unlock(txq);
3685         }
3686 }
3687
3688 static inline void niu_sync_rx_discard_stats(struct niu *np,
3689                                              struct rx_ring_info *rp,
3690                                              const int limit)
3691 {
3692         /* This elaborate scheme is needed for reading the RX discard
3693          * counters, as they are only 16-bit and can overflow quickly,
3694          * and because the overflow indication bit is not usable as
3695          * the counter value does not wrap, but remains at max value
3696          * 0xFFFF.
3697          *
3698          * In theory and in practice counters can be lost in between
3699          * reading nr64() and clearing the counter nw64().  For this
3700          * reason, the number of counter clearings nw64() is
3701          * limited/reduced though the limit parameter.
3702          */
3703         int rx_channel = rp->rx_channel;
3704         u32 misc, wred;
3705
3706         /* RXMISC (Receive Miscellaneous Discard Count), covers the
3707          * following discard events: IPP (Input Port Process),
3708          * FFLP/TCAM, Full RCR (Receive Completion Ring) RBR (Receive
3709          * Block Ring) prefetch buffer is empty.
3710          */
3711         misc = nr64(RXMISC(rx_channel));
3712         if (unlikely((misc & RXMISC_COUNT) > limit)) {
3713                 nw64(RXMISC(rx_channel), 0);
3714                 rp->rx_errors += misc & RXMISC_COUNT;
3715
3716                 if (unlikely(misc & RXMISC_OFLOW))
3717                         dev_err(np->device, "rx-%d: Counter overflow "
3718                                 "RXMISC discard\n", rx_channel);
3719
3720                 niudbg(RX_ERR, "%s-rx-%d: MISC drop=%u over=%u\n",
3721                        np->dev->name, rx_channel, misc, misc-limit);
3722         }
3723
3724         /* WRED (Weighted Random Early Discard) by hardware */
3725         wred = nr64(RED_DIS_CNT(rx_channel));
3726         if (unlikely((wred & RED_DIS_CNT_COUNT) > limit)) {
3727                 nw64(RED_DIS_CNT(rx_channel), 0);
3728                 rp->rx_dropped += wred & RED_DIS_CNT_COUNT;
3729
3730                 if (unlikely(wred & RED_DIS_CNT_OFLOW))
3731                         dev_err(np->device, "rx-%d: Counter overflow "
3732                                 "WRED discard\n", rx_channel);
3733
3734                 niudbg(RX_ERR, "%s-rx-%d: WRED drop=%u over=%u\n",
3735                        np->dev->name, rx_channel, wred, wred-limit);
3736         }
3737 }
3738
3739 static int niu_rx_work(struct napi_struct *napi, struct niu *np,
3740                        struct rx_ring_info *rp, int budget)
3741 {
3742         int qlen, rcr_done = 0, work_done = 0;
3743         struct rxdma_mailbox *mbox = rp->mbox;
3744         u64 stat;
3745
3746 #if 1
3747         stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel));
3748         qlen = nr64(RCRSTAT_A(rp->rx_channel)) & RCRSTAT_A_QLEN;
3749 #else
3750         stat = le64_to_cpup(&mbox->rx_dma_ctl_stat);
3751         qlen = (le64_to_cpup(&mbox->rcrstat_a) & RCRSTAT_A_QLEN);
3752 #endif
3753         mbox->rx_dma_ctl_stat = 0;
3754         mbox->rcrstat_a = 0;
3755
3756         niudbg(RX_STATUS, "%s: niu_rx_work(chan[%d]), stat[%llx] qlen=%d\n",
3757                np->dev->name, rp->rx_channel, (unsigned long long) stat, qlen);
3758
3759         rcr_done = work_done = 0;
3760         qlen = min(qlen, budget);
3761         while (work_done < qlen) {
3762                 rcr_done += niu_process_rx_pkt(napi, np, rp);
3763                 work_done++;
3764         }
3765
3766         if (rp->rbr_refill_pending >= rp->rbr_kick_thresh) {
3767                 unsigned int i;
3768
3769                 for (i = 0; i < rp->rbr_refill_pending; i++)
3770                         niu_rbr_refill(np, rp, GFP_ATOMIC);
3771                 rp->rbr_refill_pending = 0;
3772         }
3773
3774         stat = (RX_DMA_CTL_STAT_MEX |
3775                 ((u64)work_done << RX_DMA_CTL_STAT_PKTREAD_SHIFT) |
3776                 ((u64)rcr_done << RX_DMA_CTL_STAT_PTRREAD_SHIFT));
3777
3778         nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat);
3779
3780         /* Only sync discards stats when qlen indicate potential for drops */
3781         if (qlen > 10)
3782                 niu_sync_rx_discard_stats(np, rp, 0x7FFF);
3783
3784         return work_done;
3785 }
3786
3787 static int niu_poll_core(struct niu *np, struct niu_ldg *lp, int budget)
3788 {
3789         u64 v0 = lp->v0;
3790         u32 tx_vec = (v0 >> 32);
3791         u32 rx_vec = (v0 & 0xffffffff);
3792         int i, work_done = 0;
3793
3794         niudbg(INTR, "%s: niu_poll_core() v0[%016llx]\n",
3795                np->dev->name, (unsigned long long) v0);
3796
3797         for (i = 0; i < np->num_tx_rings; i++) {
3798                 struct tx_ring_info *rp = &np->tx_rings[i];
3799                 if (tx_vec & (1 << rp->tx_channel))
3800                         niu_tx_work(np, rp);
3801                 nw64(LD_IM0(LDN_TXDMA(rp->tx_channel)), 0);
3802         }
3803
3804         for (i = 0; i < np->num_rx_rings; i++) {
3805                 struct rx_ring_info *rp = &np->rx_rings[i];
3806
3807                 if (rx_vec & (1 << rp->rx_channel)) {
3808                         int this_work_done;
3809
3810                         this_work_done = niu_rx_work(&lp->napi, np, rp,
3811                                                      budget);
3812
3813                         budget -= this_work_done;
3814                         work_done += this_work_done;
3815                 }
3816                 nw64(LD_IM0(LDN_RXDMA(rp->rx_channel)), 0);
3817         }
3818
3819         return work_done;
3820 }
3821
3822 static int niu_poll(struct napi_struct *napi, int budget)
3823 {
3824         struct niu_ldg *lp = container_of(napi, struct niu_ldg, napi);
3825         struct niu *np = lp->np;
3826         int work_done;
3827
3828         work_done = niu_poll_core(np, lp, budget);
3829
3830         if (work_done < budget) {
3831                 napi_complete(napi);
3832                 niu_ldg_rearm(np, lp, 1);
3833         }
3834         return work_done;
3835 }
3836
3837 static void niu_log_rxchan_errors(struct niu *np, struct rx_ring_info *rp,
3838                                   u64 stat)
3839 {
3840         dev_err(np->device, PFX "%s: RX channel %u errors ( ",
3841                 np->dev->name, rp->rx_channel);
3842
3843         if (stat & RX_DMA_CTL_STAT_RBR_TMOUT)
3844                 printk("RBR_TMOUT ");
3845         if (stat & RX_DMA_CTL_STAT_RSP_CNT_ERR)
3846                 printk("RSP_CNT ");
3847         if (stat & RX_DMA_CTL_STAT_BYTE_EN_BUS)
3848                 printk("BYTE_EN_BUS ");
3849         if (stat & RX_DMA_CTL_STAT_RSP_DAT_ERR)
3850                 printk("RSP_DAT ");
3851         if (stat & RX_DMA_CTL_STAT_RCR_ACK_ERR)
3852                 printk("RCR_ACK ");
3853         if (stat & RX_DMA_CTL_STAT_RCR_SHA_PAR)
3854                 printk("RCR_SHA_PAR ");
3855         if (stat & RX_DMA_CTL_STAT_RBR_PRE_PAR)
3856                 printk("RBR_PRE_PAR ");
3857         if (stat & RX_DMA_CTL_STAT_CONFIG_ERR)
3858                 printk("CONFIG ");
3859         if (stat & RX_DMA_CTL_STAT_RCRINCON)
3860                 printk("RCRINCON ");
3861         if (stat & RX_DMA_CTL_STAT_RCRFULL)
3862                 printk("RCRFULL ");
3863         if (stat & RX_DMA_CTL_STAT_RBRFULL)
3864                 printk("RBRFULL ");
3865         if (stat & RX_DMA_CTL_STAT_RBRLOGPAGE)
3866                 printk("RBRLOGPAGE ");
3867         if (stat & RX_DMA_CTL_STAT_CFIGLOGPAGE)
3868                 printk("CFIGLOGPAGE ");
3869         if (stat & RX_DMA_CTL_STAT_DC_FIFO_ERR)
3870                 printk("DC_FIDO ");
3871
3872         printk(")\n");
3873 }
3874
3875 static int niu_rx_error(struct niu *np, struct rx_ring_info *rp)
3876 {
3877         u64 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel));
3878         int err = 0;
3879
3880
3881         if (stat & (RX_DMA_CTL_STAT_CHAN_FATAL |
3882                     RX_DMA_CTL_STAT_PORT_FATAL))
3883                 err = -EINVAL;
3884
3885         if (err) {
3886                 dev_err(np->device, PFX "%s: RX channel %u error, stat[%llx]\n",
3887                         np->dev->name, rp->rx_channel,
3888                         (unsigned long long) stat);
3889
3890                 niu_log_rxchan_errors(np, rp, stat);
3891         }
3892
3893         nw64(RX_DMA_CTL_STAT(rp->rx_channel),
3894              stat & RX_DMA_CTL_WRITE_CLEAR_ERRS);
3895
3896         return err;
3897 }
3898
3899 static void niu_log_txchan_errors(struct niu *np, struct tx_ring_info *rp,
3900                                   u64 cs)
3901 {
3902         dev_err(np->device, PFX "%s: TX channel %u errors ( ",
3903                 np->dev->name, rp->tx_channel);
3904
3905         if (cs & TX_CS_MBOX_ERR)
3906                 printk("MBOX ");
3907         if (cs & TX_CS_PKT_SIZE_ERR)
3908                 printk("PKT_SIZE ");
3909         if (cs & TX_CS_TX_RING_OFLOW)
3910                 printk("TX_RING_OFLOW ");
3911         if (cs & TX_CS_PREF_BUF_PAR_ERR)
3912                 printk("PREF_BUF_PAR ");
3913         if (cs & TX_CS_NACK_PREF)
3914                 printk("NACK_PREF ");
3915         if (cs & TX_CS_NACK_PKT_RD)
3916                 printk("NACK_PKT_RD ");
3917         if (cs & TX_CS_CONF_PART_ERR)
3918                 printk("CONF_PART ");
3919         if (cs & TX_CS_PKT_PRT_ERR)
3920                 printk("PKT_PTR ");
3921
3922         printk(")\n");
3923 }
3924
3925 static int niu_tx_error(struct niu *np, struct tx_ring_info *rp)
3926 {
3927         u64 cs, logh, logl;
3928
3929         cs = nr64(TX_CS(rp->tx_channel));
3930         logh = nr64(TX_RNG_ERR_LOGH(rp->tx_channel));
3931         logl = nr64(TX_RNG_ERR_LOGL(rp->tx_channel));
3932
3933         dev_err(np->device, PFX "%s: TX channel %u error, "
3934                 "cs[%llx] logh[%llx] logl[%llx]\n",
3935                 np->dev->name, rp->tx_channel,
3936                 (unsigned long long) cs,
3937                 (unsigned long long) logh,
3938                 (unsigned long long) logl);
3939
3940         niu_log_txchan_errors(np, rp, cs);
3941
3942         return -ENODEV;
3943 }
3944
3945 static int niu_mif_interrupt(struct niu *np)
3946 {
3947         u64 mif_status = nr64(MIF_STATUS);
3948         int phy_mdint = 0;
3949
3950         if (np->flags & NIU_FLAGS_XMAC) {
3951                 u64 xrxmac_stat = nr64_mac(XRXMAC_STATUS);
3952
3953                 if (xrxmac_stat & XRXMAC_STATUS_PHY_MDINT)
3954                         phy_mdint = 1;
3955         }
3956
3957         dev_err(np->device, PFX "%s: MIF interrupt, "
3958                 "stat[%llx] phy_mdint(%d)\n",
3959                 np->dev->name, (unsigned long long) mif_status, phy_mdint);
3960
3961         return -ENODEV;
3962 }
3963
3964 static void niu_xmac_interrupt(struct niu *np)
3965 {
3966         struct niu_xmac_stats *mp = &np->mac_stats.xmac;
3967         u64 val;
3968
3969         val = nr64_mac(XTXMAC_STATUS);
3970         if (val & XTXMAC_STATUS_FRAME_CNT_EXP)
3971                 mp->tx_frames += TXMAC_FRM_CNT_COUNT;
3972         if (val & XTXMAC_STATUS_BYTE_CNT_EXP)
3973                 mp->tx_bytes += TXMAC_BYTE_CNT_COUNT;
3974         if (val & XTXMAC_STATUS_TXFIFO_XFR_ERR)
3975                 mp->tx_fifo_errors++;
3976         if (val & XTXMAC_STATUS_TXMAC_OFLOW)
3977                 mp->tx_overflow_errors++;
3978         if (val & XTXMAC_STATUS_MAX_PSIZE_ERR)
3979                 mp->tx_max_pkt_size_errors++;
3980         if (val & XTXMAC_STATUS_TXMAC_UFLOW)
3981                 mp->tx_underflow_errors++;
3982
3983         val = nr64_mac(XRXMAC_STATUS);
3984         if (val & XRXMAC_STATUS_LCL_FLT_STATUS)
3985                 mp->rx_local_faults++;
3986         if (val & XRXMAC_STATUS_RFLT_DET)
3987                 mp->rx_remote_faults++;
3988         if (val & XRXMAC_STATUS_LFLT_CNT_EXP)
3989                 mp->rx_link_faults += LINK_FAULT_CNT_COUNT;
3990         if (val & XRXMAC_STATUS_ALIGNERR_CNT_EXP)
3991                 mp->rx_align_errors += RXMAC_ALIGN_ERR_CNT_COUNT;
3992         if (val & XRXMAC_STATUS_RXFRAG_CNT_EXP)
3993                 mp->rx_frags += RXMAC_FRAG_CNT_COUNT;
3994         if (val & XRXMAC_STATUS_RXMULTF_CNT_EXP)
3995                 mp->rx_mcasts += RXMAC_MC_FRM_CNT_COUNT;
3996         if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
3997                 mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
3998         if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
3999                 mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
4000         if (val & XRXMAC_STATUS_RXHIST1_CNT_EXP)
4001                 mp->rx_hist_cnt1 += RXMAC_HIST_CNT1_COUNT;
4002         if (val & XRXMAC_STATUS_RXHIST2_CNT_EXP)
4003                 mp->rx_hist_cnt2 += RXMAC_HIST_CNT2_COUNT;
4004         if (val & XRXMAC_STATUS_RXHIST3_CNT_EXP)
4005                 mp->rx_hist_cnt3 += RXMAC_HIST_CNT3_COUNT;
4006         if (val & XRXMAC_STATUS_RXHIST4_CNT_EXP)
4007                 mp->rx_hist_cnt4 += RXMAC_HIST_CNT4_COUNT;
4008         if (val & XRXMAC_STATUS_RXHIST5_CNT_EXP)
4009                 mp->rx_hist_cnt5 += RXMAC_HIST_CNT5_COUNT;
4010         if (val & XRXMAC_STATUS_RXHIST6_CNT_EXP)
4011                 mp->rx_hist_cnt6 += RXMAC_HIST_CNT6_COUNT;
4012         if (val & XRXMAC_STATUS_RXHIST7_CNT_EXP)
4013                 mp->rx_hist_cnt7 += RXMAC_HIST_CNT7_COUNT;
4014         if (val & XRXMAC_STATUS_RXOCTET_CNT_EXP)
4015                 mp->rx_octets += RXMAC_BT_CNT_COUNT;
4016         if (val & XRXMAC_STATUS_CVIOLERR_CNT_EXP)
4017                 mp->rx_code_violations += RXMAC_CD_VIO_CNT_COUNT;
4018         if (val & XRXMAC_STATUS_LENERR_CNT_EXP)
4019                 mp->rx_len_errors += RXMAC_MPSZER_CNT_COUNT;
4020         if (val & XRXMAC_STATUS_CRCERR_CNT_EXP)
4021                 mp->rx_crc_errors += RXMAC_CRC_ER_CNT_COUNT;
4022         if (val & XRXMAC_STATUS_RXUFLOW)
4023                 mp->rx_underflows++;
4024         if (val & XRXMAC_STATUS_RXOFLOW)
4025                 mp->rx_overflows++;
4026
4027         val = nr64_mac(XMAC_FC_STAT);
4028         if (val & XMAC_FC_STAT_TX_MAC_NPAUSE)
4029                 mp->pause_off_state++;
4030         if (val & XMAC_FC_STAT_TX_MAC_PAUSE)
4031                 mp->pause_on_state++;
4032         if (val & XMAC_FC_STAT_RX_MAC_RPAUSE)
4033                 mp->pause_received++;
4034 }
4035
4036 static void niu_bmac_interrupt(struct niu *np)
4037 {
4038         struct niu_bmac_stats *mp = &np->mac_stats.bmac;
4039         u64 val;
4040
4041         val = nr64_mac(BTXMAC_STATUS);
4042         if (val & BTXMAC_STATUS_UNDERRUN)
4043                 mp->tx_underflow_errors++;
4044         if (val & BTXMAC_STATUS_MAX_PKT_ERR)
4045                 mp->tx_max_pkt_size_errors++;
4046         if (val & BTXMAC_STATUS_BYTE_CNT_EXP)
4047                 mp->tx_bytes += BTXMAC_BYTE_CNT_COUNT;
4048         if (val & BTXMAC_STATUS_FRAME_CNT_EXP)
4049                 mp->tx_frames += BTXMAC_FRM_CNT_COUNT;
4050
4051         val = nr64_mac(BRXMAC_STATUS);
4052         if (val & BRXMAC_STATUS_OVERFLOW)
4053                 mp->rx_overflows++;
4054         if (val & BRXMAC_STATUS_FRAME_CNT_EXP)
4055                 mp->rx_frames += BRXMAC_FRAME_CNT_COUNT;
4056         if (val & BRXMAC_STATUS_ALIGN_ERR_EXP)
4057                 mp->rx_align_errors += BRXMAC_ALIGN_ERR_CNT_COUNT;
4058         if (val & BRXMAC_STATUS_CRC_ERR_EXP)
4059                 mp->rx_crc_errors += BRXMAC_ALIGN_ERR_CNT_COUNT;
4060         if (val & BRXMAC_STATUS_LEN_ERR_EXP)
4061                 mp->rx_len_errors += BRXMAC_CODE_VIOL_ERR_CNT_COUNT;
4062
4063         val = nr64_mac(BMAC_CTRL_STATUS);
4064         if (val & BMAC_CTRL_STATUS_NOPAUSE)
4065                 mp->pause_off_state++;
4066         if (val & BMAC_CTRL_STATUS_PAUSE)
4067                 mp->pause_on_state++;
4068         if (val & BMAC_CTRL_STATUS_PAUSE_RECV)
4069                 mp->pause_received++;
4070 }
4071
4072 static int niu_mac_interrupt(struct niu *np)
4073 {
4074         if (np->flags & NIU_FLAGS_XMAC)
4075                 niu_xmac_interrupt(np);
4076         else
4077                 niu_bmac_interrupt(np);
4078
4079         return 0;
4080 }
4081
4082 static void niu_log_device_error(struct niu *np, u64 stat)
4083 {
4084         dev_err(np->device, PFX "%s: Core device errors ( ",
4085                 np->dev->name);
4086
4087         if (stat & SYS_ERR_MASK_META2)
4088                 printk("META2 ");
4089         if (stat & SYS_ERR_MASK_META1)
4090                 printk("META1 ");
4091         if (stat & SYS_ERR_MASK_PEU)
4092                 printk("PEU ");
4093         if (stat & SYS_ERR_MASK_TXC)
4094                 printk("TXC ");
4095         if (stat & SYS_ERR_MASK_RDMC)
4096                 printk("RDMC ");
4097         if (stat & SYS_ERR_MASK_TDMC)
4098                 printk("TDMC ");
4099         if (stat & SYS_ERR_MASK_ZCP)
4100                 printk("ZCP ");
4101         if (stat & SYS_ERR_MASK_FFLP)
4102                 printk("FFLP ");
4103         if (stat & SYS_ERR_MASK_IPP)
4104                 printk("IPP ");
4105         if (stat & SYS_ERR_MASK_MAC)
4106                 printk("MAC ");
4107         if (stat & SYS_ERR_MASK_SMX)
4108                 printk("SMX ");
4109
4110         printk(")\n");
4111 }
4112
4113 static int niu_device_error(struct niu *np)
4114 {
4115         u64 stat = nr64(SYS_ERR_STAT);
4116
4117         dev_err(np->device, PFX "%s: Core device error, stat[%llx]\n",
4118                 np->dev->name, (unsigned long long) stat);
4119
4120         niu_log_device_error(np, stat);
4121
4122         return -ENODEV;
4123 }
4124
4125 static int niu_slowpath_interrupt(struct niu *np, struct niu_ldg *lp,
4126                               u64 v0, u64 v1, u64 v2)
4127 {
4128
4129         int i, err = 0;
4130
4131         lp->v0 = v0;
4132         lp->v1 = v1;
4133         lp->v2 = v2;
4134
4135         if (v1 & 0x00000000ffffffffULL) {
4136                 u32 rx_vec = (v1 & 0xffffffff);
4137
4138                 for (i = 0; i < np->num_rx_rings; i++) {
4139                         struct rx_ring_info *rp = &np->rx_rings[i];
4140
4141                         if (rx_vec & (1 << rp->rx_channel)) {
4142                                 int r = niu_rx_error(np, rp);
4143                                 if (r) {
4144                                         err = r;
4145                                 } else {
4146                                         if (!v0)
4147                                                 nw64(RX_DMA_CTL_STAT(rp->rx_channel),
4148                                                      RX_DMA_CTL_STAT_MEX);
4149                                 }
4150                         }
4151                 }
4152         }
4153         if (v1 & 0x7fffffff00000000ULL) {
4154                 u32 tx_vec = (v1 >> 32) & 0x7fffffff;
4155
4156                 for (i = 0; i < np->num_tx_rings; i++) {
4157                         struct tx_ring_info *rp = &np->tx_rings[i];
4158
4159                         if (tx_vec & (1 << rp->tx_channel)) {
4160                                 int r = niu_tx_error(np, rp);
4161                                 if (r)
4162                                         err = r;
4163                         }
4164                 }
4165         }
4166         if ((v0 | v1) & 0x8000000000000000ULL) {
4167                 int r = niu_mif_interrupt(np);
4168                 if (r)
4169                         err = r;
4170         }
4171         if (v2) {
4172                 if (v2 & 0x01ef) {
4173                         int r = niu_mac_interrupt(np);
4174                         if (r)
4175                                 err = r;
4176                 }
4177                 if (v2 & 0x0210) {
4178                         int r = niu_device_error(np);
4179                         if (r)
4180                                 err = r;
4181                 }
4182         }
4183
4184         if (err)
4185                 niu_enable_interrupts(np, 0);
4186
4187         return err;
4188 }
4189
4190 static void niu_rxchan_intr(struct niu *np, struct rx_ring_info *rp,
4191                             int ldn)
4192 {
4193         struct rxdma_mailbox *mbox = rp->mbox;
4194         u64 stat_write, stat = le64_to_cpup(&mbox->rx_dma_ctl_stat);
4195
4196         stat_write = (RX_DMA_CTL_STAT_RCRTHRES |
4197                       RX_DMA_CTL_STAT_RCRTO);
4198         nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat_write);
4199
4200         niudbg(INTR, "%s: rxchan_intr stat[%llx]\n",
4201                np->dev->name, (unsigned long long) stat);
4202 }
4203
4204 static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp,
4205                             int ldn)
4206 {
4207         rp->tx_cs = nr64(TX_CS(rp->tx_channel));
4208
4209         niudbg(INTR, "%s: txchan_intr cs[%llx]\n",
4210                np->dev->name, (unsigned long long) rp->tx_cs);
4211 }
4212
4213 static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0)
4214 {
4215         struct niu_parent *parent = np->parent;
4216         u32 rx_vec, tx_vec;
4217         int i;
4218
4219         tx_vec = (v0 >> 32);
4220         rx_vec = (v0 & 0xffffffff);
4221
4222         for (i = 0; i < np->num_rx_rings; i++) {
4223                 struct rx_ring_info *rp = &np->rx_rings[i];
4224                 int ldn = LDN_RXDMA(rp->rx_channel);
4225
4226                 if (parent->ldg_map[ldn] != ldg)
4227                         continue;
4228
4229                 nw64(LD_IM0(ldn), LD_IM0_MASK);
4230                 if (rx_vec & (1 << rp->rx_channel))
4231                         niu_rxchan_intr(np, rp, ldn);
4232         }
4233
4234         for (i = 0; i < np->num_tx_rings; i++) {
4235                 struct tx_ring_info *rp = &np->tx_rings[i];
4236                 int ldn = LDN_TXDMA(rp->tx_channel);
4237
4238                 if (parent->ldg_map[ldn] != ldg)
4239                         continue;
4240
4241                 nw64(LD_IM0(ldn), LD_IM0_MASK);
4242                 if (tx_vec & (1 << rp->tx_channel))
4243                         niu_txchan_intr(np, rp, ldn);
4244         }
4245 }
4246
4247 static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp,
4248                               u64 v0, u64 v1, u64 v2)
4249 {
4250         if (likely(napi_schedule_prep(&lp->napi))) {
4251                 lp->v0 = v0;
4252                 lp->v1 = v1;
4253                 lp->v2 = v2;
4254                 __niu_fastpath_interrupt(np, lp->ldg_num, v0);
4255                 __napi_schedule(&lp->napi);
4256         }
4257 }
4258
4259 static irqreturn_t niu_interrupt(int irq, void *dev_id)
4260 {
4261         struct niu_ldg *lp = dev_id;
4262         struct niu *np = lp->np;
4263         int ldg = lp->ldg_num;
4264         unsigned long flags;
4265         u64 v0, v1, v2;
4266
4267         if (netif_msg_intr(np))
4268                 printk(KERN_DEBUG PFX "niu_interrupt() ldg[%p](%d) ",
4269                        lp, ldg);
4270
4271         spin_lock_irqsave(&np->lock, flags);
4272
4273         v0 = nr64(LDSV0(ldg));
4274         v1 = nr64(LDSV1(ldg));
4275         v2 = nr64(LDSV2(ldg));
4276
4277         if (netif_msg_intr(np))
4278                 printk("v0[%llx] v1[%llx] v2[%llx]\n",
4279                        (unsigned long long) v0,
4280                        (unsigned long long) v1,
4281                        (unsigned long long) v2);
4282
4283         if (unlikely(!v0 && !v1 && !v2)) {
4284                 spin_unlock_irqrestore(&np->lock, flags);
4285                 return IRQ_NONE;
4286         }
4287
4288         if (unlikely((v0 & ((u64)1 << LDN_MIF)) || v1 || v2)) {
4289                 int err = niu_slowpath_interrupt(np, lp, v0, v1, v2);
4290                 if (err)
4291                         goto out;
4292         }
4293         if (likely(v0 & ~((u64)1 << LDN_MIF)))
4294                 niu_schedule_napi(np, lp, v0, v1, v2);
4295         else
4296                 niu_ldg_rearm(np, lp, 1);
4297 out:
4298         spin_unlock_irqrestore(&np->lock, flags);
4299
4300         return IRQ_HANDLED;
4301 }
4302
4303 static void niu_free_rx_ring_info(struct niu *np, struct rx_ring_info *rp)
4304 {
4305         if (rp->mbox) {
4306                 np->ops->free_coherent(np->device,
4307                                        sizeof(struct rxdma_mailbox),
4308                                        rp->mbox, rp->mbox_dma);
4309                 rp->mbox = NULL;
4310         }
4311         if (rp->rcr) {
4312                 np->ops->free_coherent(np->device,
4313                                        MAX_RCR_RING_SIZE * sizeof(__le64),
4314                                        rp->rcr, rp->rcr_dma);
4315                 rp->rcr = NULL;
4316                 rp->rcr_table_size = 0;
4317                 rp->rcr_index = 0;
4318         }
4319         if (rp->rbr) {
4320                 niu_rbr_free(np, rp);
4321
4322                 np->ops->free_coherent(np->device,
4323                                        MAX_RBR_RING_SIZE * sizeof(__le32),
4324                                        rp->rbr, rp->rbr_dma);
4325                 rp->rbr = NULL;
4326                 rp->rbr_table_size = 0;
4327                 rp->rbr_index = 0;
4328         }
4329         kfree(rp->rxhash);
4330         rp->rxhash = NULL;
4331 }
4332
4333 static void niu_free_tx_ring_info(struct niu *np, struct tx_ring_info *rp)
4334 {
4335         if (rp->mbox) {
4336                 np->ops->free_coherent(np->device,
4337                                        sizeof(struct txdma_mailbox),
4338                                        rp->mbox, rp->mbox_dma);
4339                 rp->mbox = NULL;
4340         }
4341         if (rp->descr) {
4342                 int i;
4343
4344                 for (i = 0; i < MAX_TX_RING_SIZE; i++) {
4345                         if (rp->tx_buffs[i].skb)
4346                                 (void) release_tx_packet(np, rp, i);
4347                 }
4348
4349                 np->ops->free_coherent(np->device,
4350                                        MAX_TX_RING_SIZE * sizeof(__le64),
4351                                        rp->descr, rp->descr_dma);
4352                 rp->descr = NULL;
4353                 rp->pending = 0;
4354                 rp->prod = 0;
4355                 rp->cons = 0;
4356                 rp->wrap_bit = 0;
4357         }
4358 }
4359
4360 static void niu_free_channels(struct niu *np)
4361 {
4362         int i;
4363
4364         if (np->rx_rings) {
4365                 for (i = 0; i < np->num_rx_rings; i++) {
4366                         struct rx_ring_info *rp = &np->rx_rings[i];
4367
4368                         niu_free_rx_ring_info(np, rp);
4369                 }
4370                 kfree(np->rx_rings);
4371                 np->rx_rings = NULL;
4372                 np->num_rx_rings = 0;
4373         }
4374
4375         if (np->tx_rings) {
4376                 for (i = 0; i < np->num_tx_rings; i++) {
4377                         struct tx_ring_info *rp = &np->tx_rings[i];
4378
4379                         niu_free_tx_ring_info(np, rp);
4380                 }
4381                 kfree(np->tx_rings);
4382                 np->tx_rings = NULL;
4383                 np->num_tx_rings = 0;
4384         }
4385 }
4386
4387 static int niu_alloc_rx_ring_info(struct niu *np,
4388                                   struct rx_ring_info *rp)
4389 {
4390         BUILD_BUG_ON(sizeof(struct rxdma_mailbox) != 64);
4391
4392         rp->rxhash = kzalloc(MAX_RBR_RING_SIZE * sizeof(struct page *),
4393                              GFP_KERNEL);
4394         if (!rp->rxhash)
4395                 return -ENOMEM;
4396
4397         rp->mbox = np->ops->alloc_coherent(np->device,
4398                                            sizeof(struct rxdma_mailbox),
4399                                            &rp->mbox_dma, GFP_KERNEL);
4400         if (!rp->mbox)
4401                 return -ENOMEM;
4402         if ((unsigned long)rp->mbox & (64UL - 1)) {
4403                 dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
4404                         "RXDMA mailbox %p\n", np->dev->name, rp->mbox);
4405                 return -EINVAL;
4406         }
4407
4408         rp->rcr = np->ops->alloc_coherent(np->device,
4409                                           MAX_RCR_RING_SIZE * sizeof(__le64),
4410                                           &rp->rcr_dma, GFP_KERNEL);
4411         if (!rp->rcr)
4412                 return -ENOMEM;
4413         if ((unsigned long)rp->rcr & (64UL - 1)) {
4414                 dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
4415                         "RXDMA RCR table %p\n", np->dev->name, rp->rcr);
4416                 return -EINVAL;
4417         }
4418         rp->rcr_table_size = MAX_RCR_RING_SIZE;
4419         rp->rcr_index = 0;
4420
4421         rp->rbr = np->ops->alloc_coherent(np->device,
4422                                           MAX_RBR_RING_SIZE * sizeof(__le32),
4423                                           &rp->rbr_dma, GFP_KERNEL);
4424         if (!rp->rbr)
4425                 return -ENOMEM;
4426         if ((unsigned long)rp->rbr & (64UL - 1)) {
4427                 dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
4428                         "RXDMA RBR table %p\n", np->dev->name, rp->rbr);
4429                 return -EINVAL;
4430         }
4431         rp->rbr_table_size = MAX_RBR_RING_SIZE;
4432         rp->rbr_index = 0;
4433         rp->rbr_pending = 0;
4434
4435         return 0;
4436 }
4437
4438 static void niu_set_max_burst(struct niu *np, struct tx_ring_info *rp)
4439 {
4440         int mtu = np->dev->mtu;
4441
4442         /* These values are recommended by the HW designers for fair
4443          * utilization of DRR amongst the rings.
4444          */
4445         rp->max_burst = mtu + 32;
4446         if (rp->max_burst > 4096)
4447                 rp->max_burst = 4096;
4448 }
4449
4450 static int niu_alloc_tx_ring_info(struct niu *np,
4451                                   struct tx_ring_info *rp)
4452 {
4453         BUILD_BUG_ON(sizeof(struct txdma_mailbox) != 64);
4454
4455         rp->mbox = np->ops->alloc_coherent(np->device,
4456                                            sizeof(struct txdma_mailbox),
4457                                            &rp->mbox_dma, GFP_KERNEL);
4458         if (!rp->mbox)
4459                 return -ENOMEM;
4460         if ((unsigned long)rp->mbox & (64UL - 1)) {
4461                 dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
4462                         "TXDMA mailbox %p\n", np->dev->name, rp->mbox);
4463                 return -EINVAL;
4464         }
4465
4466         rp->descr = np->ops->alloc_coherent(np->device,
4467                                             MAX_TX_RING_SIZE * sizeof(__le64),
4468                                             &rp->descr_dma, GFP_KERNEL);
4469         if (!rp->descr)
4470                 return -ENOMEM;
4471         if ((unsigned long)rp->descr & (64UL - 1)) {
4472                 dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
4473                         "TXDMA descr table %p\n", np->dev->name, rp->descr);
4474                 return -EINVAL;
4475         }
4476
4477         rp->pending = MAX_TX_RING_SIZE;
4478         rp->prod = 0;
4479         rp->cons = 0;
4480         rp->wrap_bit = 0;
4481
4482         /* XXX make these configurable... XXX */
4483         rp->mark_freq = rp->pending / 4;
4484
4485         niu_set_max_burst(np, rp);
4486
4487         return 0;
4488 }
4489
4490 static void niu_size_rbr(struct niu *np, struct rx_ring_info *rp)
4491 {
4492         u16 bss;
4493
4494         bss = min(PAGE_SHIFT, 15);
4495
4496         rp->rbr_block_size = 1 << bss;
4497         rp->rbr_blocks_per_page = 1 << (PAGE_SHIFT-bss);
4498
4499         rp->rbr_sizes[0] = 256;
4500         rp->rbr_sizes[1] = 1024;
4501         if (np->dev->mtu > ETH_DATA_LEN) {
4502                 switch (PAGE_SIZE) {
4503                 case 4 * 1024:
4504                         rp->rbr_sizes[2] = 4096;
4505                         break;
4506
4507                 default:
4508                         rp->rbr_sizes[2] = 8192;
4509                         break;
4510                 }
4511         } else {
4512                 rp->rbr_sizes[2] = 2048;
4513         }
4514         rp->rbr_sizes[3] = rp->rbr_block_size;
4515 }
4516
4517 static int niu_alloc_channels(struct niu *np)
4518 {
4519         struct niu_parent *parent = np->parent;
4520         int first_rx_channel, first_tx_channel;
4521         int i, port, err;
4522
4523         port = np->port;
4524         first_rx_channel = first_tx_channel = 0;
4525         for (i = 0; i < port; i++) {
4526                 first_rx_channel += parent->rxchan_per_port[i];
4527                 first_tx_channel += parent->txchan_per_port[i];
4528         }
4529
4530         np->num_rx_rings = parent->rxchan_per_port[port];
4531         np->num_tx_rings = parent->txchan_per_port[port];
4532
4533         np->dev->real_num_tx_queues = np->num_tx_rings;
4534
4535         np->rx_rings = kzalloc(np->num_rx_rings * sizeof(struct rx_ring_info),
4536                                GFP_KERNEL);
4537         err = -ENOMEM;
4538         if (!np->rx_rings)
4539                 goto out_err;
4540
4541         for (i = 0; i < np->num_rx_rings; i++) {
4542                 struct rx_ring_info *rp = &np->rx_rings[i];
4543
4544                 rp->np = np;
4545                 rp->rx_channel = first_rx_channel + i;
4546
4547                 err = niu_alloc_rx_ring_info(np, rp);
4548                 if (err)
4549                         goto out_err;
4550
4551                 niu_size_rbr(np, rp);
4552
4553                 /* XXX better defaults, configurable, etc... XXX */
4554                 rp->nonsyn_window = 64;
4555                 rp->nonsyn_threshold = rp->rcr_table_size - 64;
4556                 rp->syn_window = 64;
4557                 rp->syn_threshold = rp->rcr_table_size - 64;
4558                 rp->rcr_pkt_threshold = 16;
4559                 rp->rcr_timeout = 8;
4560                 rp->rbr_kick_thresh = RBR_REFILL_MIN;
4561                 if (rp->rbr_kick_thresh < rp->rbr_blocks_per_page)
4562                         rp->rbr_kick_thresh = rp->rbr_blocks_per_page;
4563
4564                 err = niu_rbr_fill(np, rp, GFP_KERNEL);
4565                 if (err)
4566                         return err;
4567         }
4568
4569         np->tx_rings = kzalloc(np->num_tx_rings * sizeof(struct tx_ring_info),
4570                                GFP_KERNEL);
4571         err = -ENOMEM;
4572         if (!np->tx_rings)
4573                 goto out_err;
4574
4575         for (i = 0; i < np->num_tx_rings; i++) {
4576                 struct tx_ring_info *rp = &np->tx_rings[i];
4577
4578                 rp->np = np;
4579                 rp->tx_channel = first_tx_channel + i;
4580
4581                 err = niu_alloc_tx_ring_info(np, rp);
4582                 if (err)
4583                         goto out_err;
4584         }
4585
4586         return 0;
4587
4588 out_err:
4589         niu_free_channels(np);
4590         return err;
4591 }
4592
4593 static int niu_tx_cs_sng_poll(struct niu *np, int channel)
4594 {
4595         int limit = 1000;
4596
4597         while (--limit > 0) {
4598                 u64 val = nr64(TX_CS(channel));
4599                 if (val & TX_CS_SNG_STATE)
4600                         return 0;
4601         }
4602         return -ENODEV;
4603 }
4604
4605 static int niu_tx_channel_stop(struct niu *np, int channel)
4606 {
4607         u64 val = nr64(TX_CS(channel));
4608
4609         val |= TX_CS_STOP_N_GO;
4610         nw64(TX_CS(channel), val);
4611
4612         return niu_tx_cs_sng_poll(np, channel);
4613 }
4614
4615 static int niu_tx_cs_reset_poll(struct niu *np, int channel)
4616 {
4617         int limit = 1000;
4618
4619         while (--limit > 0) {
4620                 u64 val = nr64(TX_CS(channel));
4621                 if (!(val & TX_CS_RST))
4622                         return 0;
4623         }
4624         return -ENODEV;
4625 }
4626
4627 static int niu_tx_channel_reset(struct niu *np, int channel)
4628 {
4629         u64 val = nr64(TX_CS(channel));
4630         int err;
4631
4632         val |= TX_CS_RST;
4633         nw64(TX_CS(channel), val);
4634
4635         err = niu_tx_cs_reset_poll(np, channel);
4636         if (!err)
4637                 nw64(TX_RING_KICK(channel), 0);
4638
4639         return err;
4640 }
4641
4642 static int niu_tx_channel_lpage_init(struct niu *np, int channel)
4643 {
4644         u64 val;
4645
4646         nw64(TX_LOG_MASK1(channel), 0);
4647         nw64(TX_LOG_VAL1(channel), 0);
4648         nw64(TX_LOG_MASK2(channel), 0);
4649         nw64(TX_LOG_VAL2(channel), 0);
4650         nw64(TX_LOG_PAGE_RELO1(channel), 0);
4651         nw64(TX_LOG_PAGE_RELO2(channel), 0);
4652         nw64(TX_LOG_PAGE_HDL(channel), 0);
4653
4654         val  = (u64)np->port << TX_LOG_PAGE_VLD_FUNC_SHIFT;
4655         val |= (TX_LOG_PAGE_VLD_PAGE0 | TX_LOG_PAGE_VLD_PAGE1);
4656         nw64(TX_LOG_PAGE_VLD(channel), val);
4657
4658         /* XXX TXDMA 32bit mode? XXX */
4659
4660         return 0;
4661 }
4662
4663 static void niu_txc_enable_port(struct niu *np, int on)
4664 {
4665         unsigned long flags;
4666         u64 val, mask;
4667
4668         niu_lock_parent(np, flags);
4669         val = nr64(TXC_CONTROL);
4670         mask = (u64)1 << np->port;
4671         if (on) {
4672                 val |= TXC_CONTROL_ENABLE | mask;
4673         } else {
4674                 val &= ~mask;
4675                 if ((val & ~TXC_CONTROL_ENABLE) == 0)
4676                         val &= ~TXC_CONTROL_ENABLE;
4677         }
4678         nw64(TXC_CONTROL, val);
4679         niu_unlock_parent(np, flags);
4680 }
4681
4682 static void niu_txc_set_imask(struct niu *np, u64 imask)
4683 {
4684         unsigned long flags;
4685         u64 val;
4686
4687         niu_lock_parent(np, flags);
4688         val = nr64(TXC_INT_MASK);
4689         val &= ~TXC_INT_MASK_VAL(np->port);
4690         val |= (imask << TXC_INT_MASK_VAL_SHIFT(np->port));
4691         niu_unlock_parent(np, flags);
4692 }
4693
4694 static void niu_txc_port_dma_enable(struct niu *np, int on)
4695 {
4696         u64 val = 0;
4697
4698         if (on) {
4699                 int i;
4700
4701                 for (i = 0; i < np->num_tx_rings; i++)
4702                         val |= (1 << np->tx_rings[i].tx_channel);
4703         }
4704         nw64(TXC_PORT_DMA(np->port), val);
4705 }
4706
4707 static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
4708 {
4709         int err, channel = rp->tx_channel;
4710         u64 val, ring_len;
4711
4712         err = niu_tx_channel_stop(np, channel);
4713         if (err)
4714                 return err;
4715
4716         err = niu_tx_channel_reset(np, channel);
4717         if (err)
4718                 return err;
4719
4720         err = niu_tx_channel_lpage_init(np, channel);
4721         if (err)
4722                 return err;
4723
4724         nw64(TXC_DMA_MAX(channel), rp->max_burst);
4725         nw64(TX_ENT_MSK(channel), 0);
4726
4727         if (rp->descr_dma & ~(TX_RNG_CFIG_STADDR_BASE |
4728                               TX_RNG_CFIG_STADDR)) {
4729                 dev_err(np->device, PFX "%s: TX ring channel %d "
4730                         "DMA addr (%llx) is not aligned.\n",
4731                         np->dev->name, channel,
4732                         (unsigned long long) rp->descr_dma);
4733                 return -EINVAL;
4734         }
4735
4736         /* The length field in TX_RNG_CFIG is measured in 64-byte
4737          * blocks.  rp->pending is the number of TX descriptors in
4738          * our ring, 8 bytes each, thus we divide by 8 bytes more
4739          * to get the proper value the chip wants.
4740          */
4741         ring_len = (rp->pending / 8);
4742
4743         val = ((ring_len << TX_RNG_CFIG_LEN_SHIFT) |
4744                rp->descr_dma);
4745         nw64(TX_RNG_CFIG(channel), val);
4746
4747         if (((rp->mbox_dma >> 32) & ~TXDMA_MBH_MBADDR) ||
4748             ((u32)rp->mbox_dma & ~TXDMA_MBL_MBADDR)) {
4749                 dev_err(np->device, PFX "%s: TX ring channel %d "
4750                         "MBOX addr (%llx) is has illegal bits.\n",
4751                         np->dev->name, channel,
4752                         (unsigned long long) rp->mbox_dma);
4753                 return -EINVAL;
4754         }
4755         nw64(TXDMA_MBH(channel), rp->mbox_dma >> 32);
4756         nw64(TXDMA_MBL(channel), rp->mbox_dma & TXDMA_MBL_MBADDR);
4757
4758         nw64(TX_CS(channel), 0);
4759
4760         rp->last_pkt_cnt = 0;
4761
4762         return 0;
4763 }
4764
4765 static void niu_init_rdc_groups(struct niu *np)
4766 {
4767         struct niu_rdc_tables *tp = &np->parent->rdc_group_cfg[np->port];
4768         int i, first_table_num = tp->first_table_num;
4769
4770         for (i = 0; i < tp->num_tables; i++) {
4771                 struct rdc_table *tbl = &tp->tables[i];
4772                 int this_table = first_table_num + i;
4773                 int slot;
4774
4775                 for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++)
4776                         nw64(RDC_TBL(this_table, slot),
4777                              tbl->rxdma_channel[slot]);
4778         }
4779
4780         nw64(DEF_RDC(np->port), np->parent->rdc_default[np->port]);
4781 }
4782
4783 static void niu_init_drr_weight(struct niu *np)
4784 {
4785         int type = phy_decode(np->parent->port_phy, np->port);
4786         u64 val;
4787
4788         switch (type) {
4789         case PORT_TYPE_10G:
4790                 val = PT_DRR_WEIGHT_DEFAULT_10G;
4791                 break;
4792
4793         case PORT_TYPE_1G:
4794         default:
4795                 val = PT_DRR_WEIGHT_DEFAULT_1G;
4796                 break;
4797         }
4798         nw64(PT_DRR_WT(np->port), val);
4799 }
4800
4801 static int niu_init_hostinfo(struct niu *np)
4802 {
4803         struct niu_parent *parent = np->parent;
4804         struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
4805         int i, err, num_alt = niu_num_alt_addr(np);
4806         int first_rdc_table = tp->first_table_num;
4807
4808         err = niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
4809         if (err)
4810                 return err;
4811
4812         err = niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
4813         if (err)
4814                 return err;
4815
4816         for (i = 0; i < num_alt; i++) {
4817                 err = niu_set_alt_mac_rdc_table(np, i, first_rdc_table, 1);
4818                 if (err)
4819                         return err;
4820         }
4821
4822         return 0;
4823 }
4824
4825 static int niu_rx_channel_reset(struct niu *np, int channel)
4826 {
4827         return niu_set_and_wait_clear(np, RXDMA_CFIG1(channel),
4828                                       RXDMA_CFIG1_RST, 1000, 10,
4829                                       "RXDMA_CFIG1");
4830 }
4831
4832 static int niu_rx_channel_lpage_init(struct niu *np, int channel)
4833 {
4834         u64 val;
4835
4836         nw64(RX_LOG_MASK1(channel), 0);
4837         nw64(RX_LOG_VAL1(channel), 0);
4838         nw64(RX_LOG_MASK2(channel), 0);
4839         nw64(RX_LOG_VAL2(channel), 0);
4840         nw64(RX_LOG_PAGE_RELO1(channel), 0);
4841         nw64(RX_LOG_PAGE_RELO2(channel), 0);
4842         nw64(RX_LOG_PAGE_HDL(channel), 0);
4843
4844         val  = (u64)np->port << RX_LOG_PAGE_VLD_FUNC_SHIFT;
4845         val |= (RX_LOG_PAGE_VLD_PAGE0 | RX_LOG_PAGE_VLD_PAGE1);
4846         nw64(RX_LOG_PAGE_VLD(channel), val);
4847
4848         return 0;
4849 }
4850
4851 static void niu_rx_channel_wred_init(struct niu *np, struct rx_ring_info *rp)
4852 {
4853         u64 val;
4854
4855         val = (((u64)rp->nonsyn_window << RDC_RED_PARA_WIN_SHIFT) |
4856                ((u64)rp->nonsyn_threshold << RDC_RED_PARA_THRE_SHIFT) |
4857                ((u64)rp->syn_window << RDC_RED_PARA_WIN_SYN_SHIFT) |
4858                ((u64)rp->syn_threshold << RDC_RED_PARA_THRE_SYN_SHIFT));
4859         nw64(RDC_RED_PARA(rp->rx_channel), val);
4860 }
4861
4862 static int niu_compute_rbr_cfig_b(struct rx_ring_info *rp, u64 *ret)
4863 {
4864         u64 val = 0;
4865
4866         *ret = 0;
4867         switch (rp->rbr_block_size) {
4868         case 4 * 1024:
4869                 val |= (RBR_BLKSIZE_4K << RBR_CFIG_B_BLKSIZE_SHIFT);
4870                 break;
4871         case 8 * 1024:
4872                 val |= (RBR_BLKSIZE_8K << RBR_CFIG_B_BLKSIZE_SHIFT);
4873                 break;
4874         case 16 * 1024:
4875                 val |= (RBR_BLKSIZE_16K << RBR_CFIG_B_BLKSIZE_SHIFT);
4876                 break;
4877         case 32 * 1024:
4878                 val |= (RBR_BLKSIZE_32K << RBR_CFIG_B_BLKSIZE_SHIFT);
4879                 break;
4880         default:
4881                 return -EINVAL;
4882         }
4883         val |= RBR_CFIG_B_VLD2;
4884         switch (rp->rbr_sizes[2]) {
4885         case 2 * 1024:
4886                 val |= (RBR_BUFSZ2_2K << RBR_CFIG_B_BUFSZ2_SHIFT);
4887                 break;
4888         case 4 * 1024:
4889                 val |= (RBR_BUFSZ2_4K << RBR_CFIG_B_BUFSZ2_SHIFT);
4890                 break;
4891         case 8 * 1024:
4892                 val |= (RBR_BUFSZ2_8K << RBR_CFIG_B_BUFSZ2_SHIFT);
4893                 break;
4894         case 16 * 1024:
4895                 val |= (RBR_BUFSZ2_16K << RBR_CFIG_B_BUFSZ2_SHIFT);
4896                 break;
4897
4898         default:
4899                 return -EINVAL;
4900         }
4901         val |= RBR_CFIG_B_VLD1;
4902         switch (rp->rbr_sizes[1]) {
4903         case 1 * 1024:
4904                 val |= (RBR_BUFSZ1_1K << RBR_CFIG_B_BUFSZ1_SHIFT);
4905                 break;
4906         case 2 * 1024:
4907                 val |= (RBR_BUFSZ1_2K << RBR_CFIG_B_BUFSZ1_SHIFT);
4908                 break;
4909         case 4 * 1024:
4910                 val |= (RBR_BUFSZ1_4K << RBR_CFIG_B_BUFSZ1_SHIFT);
4911                 break;
4912         case 8 * 1024:
4913                 val |= (RBR_BUFSZ1_8K << RBR_CFIG_B_BUFSZ1_SHIFT);
4914                 break;
4915
4916         default:
4917                 return -EINVAL;
4918         }
4919         val |= RBR_CFIG_B_VLD0;
4920         switch (rp->rbr_sizes[0]) {
4921         case 256:
4922                 val |= (RBR_BUFSZ0_256 << RBR_CFIG_B_BUFSZ0_SHIFT);
4923                 break;
4924         case 512:
4925                 val |= (RBR_BUFSZ0_512 << RBR_CFIG_B_BUFSZ0_SHIFT);
4926                 break;
4927         case 1 * 1024:
4928                 val |= (RBR_BUFSZ0_1K << RBR_CFIG_B_BUFSZ0_SHIFT);
4929                 break;
4930         case 2 * 1024:
4931                 val |= (RBR_BUFSZ0_2K << RBR_CFIG_B_BUFSZ0_SHIFT);
4932                 break;
4933
4934         default:
4935                 return -EINVAL;
4936         }
4937
4938         *ret = val;
4939         return 0;
4940 }
4941
4942 static int niu_enable_rx_channel(struct niu *np, int channel, int on)
4943 {
4944         u64 val = nr64(RXDMA_CFIG1(channel));
4945         int limit;
4946
4947         if (on)
4948                 val |= RXDMA_CFIG1_EN;
4949         else
4950                 val &= ~RXDMA_CFIG1_EN;
4951         nw64(RXDMA_CFIG1(channel), val);
4952
4953         limit = 1000;
4954         while (--limit > 0) {
4955                 if (nr64(RXDMA_CFIG1(channel)) & RXDMA_CFIG1_QST)
4956                         break;
4957                 udelay(10);
4958         }
4959         if (limit <= 0)
4960                 return -ENODEV;
4961         return 0;
4962 }
4963
4964 static int niu_init_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
4965 {
4966         int err, channel = rp->rx_channel;
4967         u64 val;
4968
4969         err = niu_rx_channel_reset(np, channel);
4970         if (err)
4971                 return err;
4972
4973         err = niu_rx_channel_lpage_init(np, channel);
4974         if (err)
4975                 return err;
4976
4977         niu_rx_channel_wred_init(np, rp);
4978
4979         nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_RBR_EMPTY);
4980         nw64(RX_DMA_CTL_STAT(channel),
4981              (RX_DMA_CTL_STAT_MEX |
4982               RX_DMA_CTL_STAT_RCRTHRES |
4983               RX_DMA_CTL_STAT_RCRTO |
4984               RX_DMA_CTL_STAT_RBR_EMPTY));
4985         nw64(RXDMA_CFIG1(channel), rp->mbox_dma >> 32);
4986         nw64(RXDMA_CFIG2(channel), (rp->mbox_dma & 0x00000000ffffffc0));
4987         nw64(RBR_CFIG_A(channel),
4988              ((u64)rp->rbr_table_size << RBR_CFIG_A_LEN_SHIFT) |
4989              (rp->rbr_dma & (RBR_CFIG_A_STADDR_BASE | RBR_CFIG_A_STADDR)));
4990         err = niu_compute_rbr_cfig_b(rp, &val);
4991         if (err)
4992                 return err;
4993         nw64(RBR_CFIG_B(channel), val);
4994         nw64(RCRCFIG_A(channel),
4995              ((u64)rp->rcr_table_size << RCRCFIG_A_LEN_SHIFT) |
4996              (rp->rcr_dma & (RCRCFIG_A_STADDR_BASE | RCRCFIG_A_STADDR)));
4997         nw64(RCRCFIG_B(channel),
4998              ((u64)rp->rcr_pkt_threshold << RCRCFIG_B_PTHRES_SHIFT) |
4999              RCRCFIG_B_ENTOUT |
5000              ((u64)rp->rcr_timeout << RCRCFIG_B_TIMEOUT_SHIFT));
5001
5002         err = niu_enable_rx_channel(np, channel, 1);
5003         if (err)
5004                 return err;
5005
5006         nw64(RBR_KICK(channel), rp->rbr_index);
5007
5008         val = nr64(RX_DMA_CTL_STAT(channel));
5009         val |= RX_DMA_CTL_STAT_RBR_EMPTY;
5010         nw64(RX_DMA_CTL_STAT(channel), val);
5011
5012         return 0;
5013 }
5014
5015 static int niu_init_rx_channels(struct niu *np)
5016 {
5017         unsigned long flags;
5018         u64 seed = jiffies_64;
5019         int err, i;
5020
5021         niu_lock_parent(np, flags);
5022         nw64(RX_DMA_CK_DIV, np->parent->rxdma_clock_divider);
5023         nw64(RED_RAN_INIT, RED_RAN_INIT_OPMODE | (seed & RED_RAN_INIT_VAL));
5024         niu_unlock_parent(np, flags);
5025
5026         /* XXX RXDMA 32bit mode? XXX */
5027
5028         niu_init_rdc_groups(np);
5029         niu_init_drr_weight(np);
5030
5031         err = niu_init_hostinfo(np);
5032         if (err)
5033                 return err;
5034
5035         for (i = 0; i < np->num_rx_rings; i++) {
5036                 struct rx_ring_info *rp = &np->rx_rings[i];
5037
5038                 err = niu_init_one_rx_channel(np, rp);
5039                 if (err)
5040                         return err;
5041         }
5042
5043         return 0;
5044 }
5045
5046 static int niu_set_ip_frag_rule(struct niu *np)
5047 {
5048         struct niu_parent *parent = np->parent;
5049         struct niu_classifier *cp = &np->clas;
5050         struct niu_tcam_entry *tp;
5051         int index, err;
5052
5053         index = cp->tcam_top;
5054         tp = &parent->tcam[index];
5055
5056         /* Note that the noport bit is the same in both ipv4 and
5057          * ipv6 format TCAM entries.
5058          */
5059         memset(tp, 0, sizeof(*tp));
5060         tp->key[1] = TCAM_V4KEY1_NOPORT;
5061         tp->key_mask[1] = TCAM_V4KEY1_NOPORT;
5062         tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET |
5063                           ((u64)0 << TCAM_ASSOCDATA_OFFSET_SHIFT));
5064         err = tcam_write(np, index, tp->key, tp->key_mask);
5065         if (err)
5066                 return err;
5067         err = tcam_assoc_write(np, index, tp->assoc_data);
5068         if (err)
5069                 return err;
5070         tp->valid = 1;
5071         cp->tcam_valid_entries++;
5072
5073         return 0;
5074 }
5075
5076 static int niu_init_classifier_hw(struct niu *np)
5077 {
5078         struct niu_parent *parent = np->parent;
5079         struct niu_classifier *cp = &np->clas;
5080         int i, err;
5081
5082         nw64(H1POLY, cp->h1_init);
5083         nw64(H2POLY, cp->h2_init);
5084
5085         err = niu_init_hostinfo(np);
5086         if (err)
5087                 return err;
5088
5089         for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++) {
5090                 struct niu_vlan_rdc *vp = &cp->vlan_mappings[i];
5091
5092                 vlan_tbl_write(np, i, np->port,
5093                                vp->vlan_pref, vp->rdc_num);
5094         }
5095
5096         for (i = 0; i < cp->num_alt_mac_mappings; i++) {
5097                 struct niu_altmac_rdc *ap = &cp->alt_mac_mappings[i];
5098
5099                 err = niu_set_alt_mac_rdc_table(np, ap->alt_mac_num,
5100                                                 ap->rdc_num, ap->mac_pref);
5101                 if (err)
5102                         return err;
5103         }
5104
5105         for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) {
5106                 int index = i - CLASS_CODE_USER_PROG1;
5107
5108                 err = niu_set_tcam_key(np, i, parent->tcam_key[index]);
5109                 if (err)
5110                         return err;
5111                 err = niu_set_flow_key(np, i, parent->flow_key[index]);
5112                 if (err)
5113                         return err;
5114         }
5115
5116         err = niu_set_ip_frag_rule(np);
5117         if (err)
5118                 return err;
5119
5120         tcam_enable(np, 1);
5121
5122         return 0;
5123 }
5124
5125 static int niu_zcp_write(struct niu *np, int index, u64 *data)
5126 {
5127         nw64(ZCP_RAM_DATA0, data[0]);
5128         nw64(ZCP_RAM_DATA1, data[1]);
5129         nw64(ZCP_RAM_DATA2, data[2]);
5130         nw64(ZCP_RAM_DATA3, data[3]);
5131         nw64(ZCP_RAM_DATA4, data[4]);
5132         nw64(ZCP_RAM_BE, ZCP_RAM_BE_VAL);
5133         nw64(ZCP_RAM_ACC,
5134              (ZCP_RAM_ACC_WRITE |
5135               (0 << ZCP_RAM_ACC_ZFCID_SHIFT) |
5136               (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT)));
5137
5138         return niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
5139                                    1000, 100);
5140 }
5141
5142 static int niu_zcp_read(struct niu *np, int index, u64 *data)
5143 {
5144         int err;
5145
5146         err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
5147                                   1000, 100);
5148         if (err) {
5149                 dev_err(np->device, PFX "%s: ZCP read busy won't clear, "
5150                         "ZCP_RAM_ACC[%llx]\n", np->dev->name,
5151                         (unsigned long long) nr64(ZCP_RAM_ACC));
5152                 return err;
5153         }
5154
5155         nw64(ZCP_RAM_ACC,
5156              (ZCP_RAM_ACC_READ |
5157               (0 << ZCP_RAM_ACC_ZFCID_SHIFT) |
5158               (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT)));
5159
5160         err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
5161                                   1000, 100);
5162         if (err) {
5163                 dev_err(np->device, PFX "%s: ZCP read busy2 won't clear, "
5164                         "ZCP_RAM_ACC[%llx]\n", np->dev->name,
5165                         (unsigned long long) nr64(ZCP_RAM_ACC));
5166                 return err;
5167         }
5168
5169         data[0] = nr64(ZCP_RAM_DATA0);
5170         data[1] = nr64(ZCP_RAM_DATA1);
5171         data[2] = nr64(ZCP_RAM_DATA2);
5172         data[3] = nr64(ZCP_RAM_DATA3);
5173         data[4] = nr64(ZCP_RAM_DATA4);
5174
5175         return 0;
5176 }
5177
5178 static void niu_zcp_cfifo_reset(struct niu *np)
5179 {
5180         u64 val = nr64(RESET_CFIFO);
5181
5182         val |= RESET_CFIFO_RST(np->port);
5183         nw64(RESET_CFIFO, val);
5184         udelay(10);
5185
5186         val &= ~RESET_CFIFO_RST(np->port);
5187         nw64(RESET_CFIFO, val);
5188 }
5189
5190 static int niu_init_zcp(struct niu *np)
5191 {
5192         u64 data[5], rbuf[5];
5193         int i, max, err;
5194
5195         if (np->parent->plat_type != PLAT_TYPE_NIU) {
5196                 if (np->port == 0 || np->port == 1)
5197                         max = ATLAS_P0_P1_CFIFO_ENTRIES;
5198                 else
5199                         max = ATLAS_P2_P3_CFIFO_ENTRIES;
5200         } else
5201                 max = NIU_CFIFO_ENTRIES;
5202
5203         data[0] = 0;
5204         data[1] = 0;
5205         data[2] = 0;
5206         data[3] = 0;
5207         data[4] = 0;
5208
5209         for (i = 0; i < max; i++) {
5210                 err = niu_zcp_write(np, i, data);
5211                 if (err)
5212                         return err;
5213                 err = niu_zcp_read(np, i, rbuf);
5214                 if (err)
5215                         return err;
5216         }
5217
5218         niu_zcp_cfifo_reset(np);
5219         nw64(CFIFO_ECC(np->port), 0);
5220         nw64(ZCP_INT_STAT, ZCP_INT_STAT_ALL);
5221         (void) nr64(ZCP_INT_STAT);
5222         nw64(ZCP_INT_MASK, ZCP_INT_MASK_ALL);
5223
5224         return 0;
5225 }
5226
5227 static void niu_ipp_write(struct niu *np, int index, u64 *data)
5228 {
5229         u64 val = nr64_ipp(IPP_CFIG);
5230
5231         nw64_ipp(IPP_CFIG, val | IPP_CFIG_DFIFO_PIO_W);
5232         nw64_ipp(IPP_DFIFO_WR_PTR, index);
5233         nw64_ipp(IPP_DFIFO_WR0, data[0]);
5234         nw64_ipp(IPP_DFIFO_WR1, data[1]);
5235         nw64_ipp(IPP_DFIFO_WR2, data[2]);
5236         nw64_ipp(IPP_DFIFO_WR3, data[3]);
5237         nw64_ipp(IPP_DFIFO_WR4, data[4]);
5238         nw64_ipp(IPP_CFIG, val & ~IPP_CFIG_DFIFO_PIO_W);
5239 }
5240
5241 static void niu_ipp_read(struct niu *np, int index, u64 *data)
5242 {
5243         nw64_ipp(IPP_DFIFO_RD_PTR, index);
5244         data[0] = nr64_ipp(IPP_DFIFO_RD0);
5245         data[1] = nr64_ipp(IPP_DFIFO_RD1);
5246         data[2] = nr64_ipp(IPP_DFIFO_RD2);
5247         data[3] = nr64_ipp(IPP_DFIFO_RD3);
5248         data[4] = nr64_ipp(IPP_DFIFO_RD4);
5249 }
5250
5251 static int niu_ipp_reset(struct niu *np)
5252 {
5253         return niu_set_and_wait_clear_ipp(np, IPP_CFIG, IPP_CFIG_SOFT_RST,
5254                                           1000, 100, "IPP_CFIG");
5255 }
5256
5257 static int niu_init_ipp(struct niu *np)
5258 {
5259         u64 data[5], rbuf[5], val;
5260         int i, max, err;
5261
5262         if (np->parent->plat_type != PLAT_TYPE_NIU) {
5263                 if (np->port == 0 || np->port == 1)
5264                         max = ATLAS_P0_P1_DFIFO_ENTRIES;
5265                 else
5266                         max = ATLAS_P2_P3_DFIFO_ENTRIES;
5267         } else
5268                 max = NIU_DFIFO_ENTRIES;
5269
5270         data[0] = 0;
5271         data[1] = 0;
5272         data[2] = 0;
5273         data[3] = 0;
5274         data[4] = 0;
5275
5276         for (i = 0; i < max; i++) {
5277                 niu_ipp_write(np, i, data);
5278                 niu_ipp_read(np, i, rbuf);
5279         }
5280
5281         (void) nr64_ipp(IPP_INT_STAT);
5282         (void) nr64_ipp(IPP_INT_STAT);
5283
5284         err = niu_ipp_reset(np);
5285         if (err)
5286                 return err;
5287
5288         (void) nr64_ipp(IPP_PKT_DIS);
5289         (void) nr64_ipp(IPP_BAD_CS_CNT);
5290         (void) nr64_ipp(IPP_ECC);
5291
5292         (void) nr64_ipp(IPP_INT_STAT);
5293
5294         nw64_ipp(IPP_MSK, ~IPP_MSK_ALL);
5295
5296         val = nr64_ipp(IPP_CFIG);
5297         val &= ~IPP_CFIG_IP_MAX_PKT;
5298         val |= (IPP_CFIG_IPP_ENABLE |
5299                 IPP_CFIG_DFIFO_ECC_EN |
5300                 IPP_CFIG_DROP_BAD_CRC |
5301                 IPP_CFIG_CKSUM_EN |
5302                 (0x1ffff << IPP_CFIG_IP_MAX_PKT_SHIFT));
5303         nw64_ipp(IPP_CFIG, val);
5304
5305         return 0;
5306 }
5307
5308 static void niu_handle_led(struct niu *np, int status)
5309 {
5310         u64 val;
5311         val = nr64_mac(XMAC_CONFIG);
5312
5313         if ((np->flags & NIU_FLAGS_10G) != 0 &&
5314             (np->flags & NIU_FLAGS_FIBER) != 0) {
5315                 if (status) {
5316                         val |= XMAC_CONFIG_LED_POLARITY;
5317                         val &= ~XMAC_CONFIG_FORCE_LED_ON;
5318                 } else {
5319                         val |= XMAC_CONFIG_FORCE_LED_ON;
5320                         val &= ~XMAC_CONFIG_LED_POLARITY;
5321                 }
5322         }
5323
5324         nw64_mac(XMAC_CONFIG, val);
5325 }
5326
5327 static void niu_init_xif_xmac(struct niu *np)
5328 {
5329         struct niu_link_config *lp = &np->link_config;
5330         u64 val;
5331
5332         if (np->flags & NIU_FLAGS_XCVR_SERDES) {
5333                 val = nr64(MIF_CONFIG);
5334                 val |= MIF_CONFIG_ATCA_GE;
5335                 nw64(MIF_CONFIG, val);
5336         }
5337
5338         val = nr64_mac(XMAC_CONFIG);
5339         val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC;
5340
5341         val |= XMAC_CONFIG_TX_OUTPUT_EN;
5342
5343         if (lp->loopback_mode == LOOPBACK_MAC) {
5344                 val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC;
5345                 val |= XMAC_CONFIG_LOOPBACK;
5346         } else {
5347                 val &= ~XMAC_CONFIG_LOOPBACK;
5348         }
5349
5350         if (np->flags & NIU_FLAGS_10G) {
5351                 val &= ~XMAC_CONFIG_LFS_DISABLE;
5352         } else {
5353                 val |= XMAC_CONFIG_LFS_DISABLE;
5354                 if (!(np->flags & NIU_FLAGS_FIBER) &&
5355                     !(np->flags & NIU_FLAGS_XCVR_SERDES))
5356                         val |= XMAC_CONFIG_1G_PCS_BYPASS;
5357                 else
5358                         val &= ~XMAC_CONFIG_1G_PCS_BYPASS;
5359         }
5360
5361         val &= ~XMAC_CONFIG_10G_XPCS_BYPASS;
5362
5363         if (lp->active_speed == SPEED_100)
5364                 val |= XMAC_CONFIG_SEL_CLK_25MHZ;
5365         else
5366                 val &= ~XMAC_CONFIG_SEL_CLK_25MHZ;
5367
5368         nw64_mac(XMAC_CONFIG, val);
5369
5370         val = nr64_mac(XMAC_CONFIG);
5371         val &= ~XMAC_CONFIG_MODE_MASK;
5372         if (np->flags & NIU_FLAGS_10G) {
5373                 val |= XMAC_CONFIG_MODE_XGMII;
5374         } else {
5375                 if (lp->active_speed == SPEED_1000)
5376                         val |= XMAC_CONFIG_MODE_GMII;
5377                 else
5378                         val |= XMAC_CONFIG_MODE_MII;
5379         }
5380
5381         nw64_mac(XMAC_CONFIG, val);
5382 }
5383
5384 static void niu_init_xif_bmac(struct niu *np)
5385 {
5386         struct niu_link_config *lp = &np->link_config;
5387         u64 val;
5388
5389         val = BMAC_XIF_CONFIG_TX_OUTPUT_EN;
5390
5391         if (lp->loopback_mode == LOOPBACK_MAC)
5392                 val |= BMAC_XIF_CONFIG_MII_LOOPBACK;
5393         else
5394                 val &= ~BMAC_XIF_CONFIG_MII_LOOPBACK;
5395
5396         if (lp->active_speed == SPEED_1000)
5397                 val |= BMAC_XIF_CONFIG_GMII_MODE;
5398         else
5399                 val &= ~BMAC_XIF_CONFIG_GMII_MODE;
5400
5401         val &= ~(BMAC_XIF_CONFIG_LINK_LED |
5402                  BMAC_XIF_CONFIG_LED_POLARITY);
5403
5404         if (!(np->flags & NIU_FLAGS_10G) &&
5405             !(np->flags & NIU_FLAGS_FIBER) &&
5406             lp->active_speed == SPEED_100)
5407                 val |= BMAC_XIF_CONFIG_25MHZ_CLOCK;
5408         else
5409                 val &= ~BMAC_XIF_CONFIG_25MHZ_CLOCK;
5410
5411         nw64_mac(BMAC_XIF_CONFIG, val);
5412 }
5413
5414 static void niu_init_xif(struct niu *np)
5415 {
5416         if (np->flags & NIU_FLAGS_XMAC)
5417                 niu_init_xif_xmac(np);
5418         else
5419                 niu_init_xif_bmac(np);
5420 }
5421
5422 static void niu_pcs_mii_reset(struct niu *np)
5423 {
5424         int limit = 1000;
5425         u64 val = nr64_pcs(PCS_MII_CTL);
5426         val |= PCS_MII_CTL_RST;
5427         nw64_pcs(PCS_MII_CTL, val);
5428         while ((--limit >= 0) && (val & PCS_MII_CTL_RST)) {
5429                 udelay(100);
5430                 val = nr64_pcs(PCS_MII_CTL);
5431         }
5432 }
5433
5434 static void niu_xpcs_reset(struct niu *np)
5435 {
5436         int limit = 1000;
5437         u64 val = nr64_xpcs(XPCS_CONTROL1);
5438         val |= XPCS_CONTROL1_RESET;
5439         nw64_xpcs(XPCS_CONTROL1, val);
5440         while ((--limit >= 0) && (val & XPCS_CONTROL1_RESET)) {
5441                 udelay(100);
5442                 val = nr64_xpcs(XPCS_CONTROL1);
5443         }
5444 }
5445
5446 static int niu_init_pcs(struct niu *np)
5447 {
5448         struct niu_link_config *lp = &np->link_config;
5449         u64 val;
5450
5451         switch (np->flags & (NIU_FLAGS_10G |
5452                              NIU_FLAGS_FIBER |
5453                              NIU_FLAGS_XCVR_SERDES)) {
5454         case NIU_FLAGS_FIBER:
5455                 /* 1G fiber */
5456                 nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE);
5457                 nw64_pcs(PCS_DPATH_MODE, 0);
5458                 niu_pcs_mii_reset(np);
5459                 break;
5460
5461         case NIU_FLAGS_10G:
5462         case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
5463         case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
5464                 /* 10G SERDES */
5465                 if (!(np->flags & NIU_FLAGS_XMAC))
5466                         return -EINVAL;
5467
5468                 /* 10G copper or fiber */
5469                 val = nr64_mac(XMAC_CONFIG);
5470                 val &= ~XMAC_CONFIG_10G_XPCS_BYPASS;
5471                 nw64_mac(XMAC_CONFIG, val);
5472
5473                 niu_xpcs_reset(np);
5474
5475                 val = nr64_xpcs(XPCS_CONTROL1);
5476                 if (lp->loopback_mode == LOOPBACK_PHY)
5477                         val |= XPCS_CONTROL1_LOOPBACK;
5478                 else
5479                         val &= ~XPCS_CONTROL1_LOOPBACK;
5480                 nw64_xpcs(XPCS_CONTROL1, val);
5481
5482                 nw64_xpcs(XPCS_DESKEW_ERR_CNT, 0);
5483                 (void) nr64_xpcs(XPCS_SYMERR_CNT01);
5484                 (void) nr64_xpcs(XPCS_SYMERR_CNT23);
5485                 break;
5486
5487
5488         case NIU_FLAGS_XCVR_SERDES:
5489                 /* 1G SERDES */
5490                 niu_pcs_mii_reset(np);
5491                 nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE);
5492                 nw64_pcs(PCS_DPATH_MODE, 0);
5493                 break;
5494
5495         case 0:
5496                 /* 1G copper */
5497         case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER:
5498                 /* 1G RGMII FIBER */
5499                 nw64_pcs(PCS_DPATH_MODE, PCS_DPATH_MODE_MII);
5500                 niu_pcs_mii_reset(np);
5501                 break;
5502
5503         default:
5504                 return -EINVAL;
5505         }
5506
5507         return 0;
5508 }
5509
5510 static int niu_reset_tx_xmac(struct niu *np)
5511 {
5512         return niu_set_and_wait_clear_mac(np, XTXMAC_SW_RST,
5513                                           (XTXMAC_SW_RST_REG_RS |
5514                                            XTXMAC_SW_RST_SOFT_RST),
5515                                           1000, 100, "XTXMAC_SW_RST");
5516 }
5517
5518 static int niu_reset_tx_bmac(struct niu *np)
5519 {
5520         int limit;
5521
5522         nw64_mac(BTXMAC_SW_RST, BTXMAC_SW_RST_RESET);
5523         limit = 1000;
5524         while (--limit >= 0) {
5525                 if (!(nr64_mac(BTXMAC_SW_RST) & BTXMAC_SW_RST_RESET))
5526                         break;
5527                 udelay(100);
5528         }
5529         if (limit < 0) {
5530                 dev_err(np->device, PFX "Port %u TX BMAC would not reset, "
5531                         "BTXMAC_SW_RST[%llx]\n",
5532                         np->port,
5533                         (unsigned long long) nr64_mac(BTXMAC_SW_RST));
5534                 return -ENODEV;
5535         }
5536
5537         return 0;
5538 }
5539
5540 static int niu_reset_tx_mac(struct niu *np)
5541 {
5542         if (np->flags & NIU_FLAGS_XMAC)
5543                 return niu_reset_tx_xmac(np);
5544         else
5545                 return niu_reset_tx_bmac(np);
5546 }
5547
5548 static void niu_init_tx_xmac(struct niu *np, u64 min, u64 max)
5549 {
5550         u64 val;
5551
5552         val = nr64_mac(XMAC_MIN);
5553         val &= ~(XMAC_MIN_TX_MIN_PKT_SIZE |
5554                  XMAC_MIN_RX_MIN_PKT_SIZE);
5555         val |= (min << XMAC_MIN_RX_MIN_PKT_SIZE_SHFT);
5556         val |= (min << XMAC_MIN_TX_MIN_PKT_SIZE_SHFT);
5557         nw64_mac(XMAC_MIN, val);
5558
5559         nw64_mac(XMAC_MAX, max);
5560
5561         nw64_mac(XTXMAC_STAT_MSK, ~(u64)0);
5562
5563         val = nr64_mac(XMAC_IPG);
5564         if (np->flags & NIU_FLAGS_10G) {
5565                 val &= ~XMAC_IPG_IPG_XGMII;
5566                 val |= (IPG_12_15_XGMII << XMAC_IPG_IPG_XGMII_SHIFT);
5567         } else {
5568                 val &= ~XMAC_IPG_IPG_MII_GMII;
5569                 val |= (IPG_12_MII_GMII << XMAC_IPG_IPG_MII_GMII_SHIFT);
5570         }
5571         nw64_mac(XMAC_IPG, val);
5572
5573         val = nr64_mac(XMAC_CONFIG);
5574         val &= ~(XMAC_CONFIG_ALWAYS_NO_CRC |
5575                  XMAC_CONFIG_STRETCH_MODE |
5576                  XMAC_CONFIG_VAR_MIN_IPG_EN |
5577                  XMAC_CONFIG_TX_ENABLE);
5578         nw64_mac(XMAC_CONFIG, val);
5579
5580         nw64_mac(TXMAC_FRM_CNT, 0);
5581         nw64_mac(TXMAC_BYTE_CNT, 0);
5582 }
5583
5584 static void niu_init_tx_bmac(struct niu *np, u64 min, u64 max)
5585 {
5586         u64 val;
5587
5588         nw64_mac(BMAC_MIN_FRAME, min);
5589         nw64_mac(BMAC_MAX_FRAME, max);
5590
5591         nw64_mac(BTXMAC_STATUS_MASK, ~(u64)0);
5592         nw64_mac(BMAC_CTRL_TYPE, 0x8808);
5593         nw64_mac(BMAC_PREAMBLE_SIZE, 7);
5594
5595         val = nr64_mac(BTXMAC_CONFIG);
5596         val &= ~(BTXMAC_CONFIG_FCS_DISABLE |
5597                  BTXMAC_CONFIG_ENABLE);
5598         nw64_mac(BTXMAC_CONFIG, val);
5599 }
5600
5601 static void niu_init_tx_mac(struct niu *np)
5602 {
5603         u64 min, max;
5604
5605         min = 64;
5606         if (np->dev->mtu > ETH_DATA_LEN)
5607                 max = 9216;
5608         else
5609                 max = 1522;
5610
5611         /* The XMAC_MIN register only accepts values for TX min which
5612          * have the low 3 bits cleared.
5613          */
5614         BUG_ON(min & 0x7);
5615
5616         if (np->flags & NIU_FLAGS_XMAC)
5617                 niu_init_tx_xmac(np, min, max);
5618         else
5619                 niu_init_tx_bmac(np, min, max);
5620 }
5621
5622 static int niu_reset_rx_xmac(struct niu *np)
5623 {
5624         int limit;
5625
5626         nw64_mac(XRXMAC_SW_RST,
5627                  XRXMAC_SW_RST_REG_RS | XRXMAC_SW_RST_SOFT_RST);
5628         limit = 1000;
5629         while (--limit >= 0) {
5630                 if (!(nr64_mac(XRXMAC_SW_RST) & (XRXMAC_SW_RST_REG_RS |
5631                                                  XRXMAC_SW_RST_SOFT_RST)))
5632                     break;
5633                 udelay(100);
5634         }
5635         if (limit < 0) {
5636                 dev_err(np->device, PFX "Port %u RX XMAC would not reset, "
5637                         "XRXMAC_SW_RST[%llx]\n",
5638                         np->port,
5639                         (unsigned long long) nr64_mac(XRXMAC_SW_RST));
5640                 return -ENODEV;
5641         }
5642
5643         return 0;
5644 }
5645
5646 static int niu_reset_rx_bmac(struct niu *np)
5647 {
5648         int limit;
5649
5650         nw64_mac(BRXMAC_SW_RST, BRXMAC_SW_RST_RESET);
5651         limit = 1000;
5652         while (--limit >= 0) {
5653                 if (!(nr64_mac(BRXMAC_SW_RST) & BRXMAC_SW_RST_RESET))
5654                         break;
5655                 udelay(100);
5656         }
5657         if (limit < 0) {
5658                 dev_err(np->device, PFX "Port %u RX BMAC would not reset, "
5659                         "BRXMAC_SW_RST[%llx]\n",
5660                         np->port,
5661                         (unsigned long long) nr64_mac(BRXMAC_SW_RST));
5662                 return -ENODEV;
5663         }
5664
5665         return 0;
5666 }
5667
5668 static int niu_reset_rx_mac(struct niu *np)
5669 {
5670         if (np->flags & NIU_FLAGS_XMAC)
5671                 return niu_reset_rx_xmac(np);
5672         else
5673                 return niu_reset_rx_bmac(np);
5674 }
5675
5676 static void niu_init_rx_xmac(struct niu *np)
5677 {
5678         struct niu_parent *parent = np->parent;
5679         struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
5680         int first_rdc_table = tp->first_table_num;
5681         unsigned long i;
5682         u64 val;
5683
5684         nw64_mac(XMAC_ADD_FILT0, 0);
5685         nw64_mac(XMAC_ADD_FILT1, 0);
5686         nw64_mac(XMAC_ADD_FILT2, 0);
5687         nw64_mac(XMAC_ADD_FILT12_MASK, 0);
5688         nw64_mac(XMAC_ADD_FILT00_MASK, 0);
5689         for (i = 0; i < MAC_NUM_HASH; i++)
5690                 nw64_mac(XMAC_HASH_TBL(i), 0);
5691         nw64_mac(XRXMAC_STAT_MSK, ~(u64)0);
5692         niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
5693         niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
5694
5695         val = nr64_mac(XMAC_CONFIG);
5696         val &= ~(XMAC_CONFIG_RX_MAC_ENABLE |
5697                  XMAC_CONFIG_PROMISCUOUS |
5698                  XMAC_CONFIG_PROMISC_GROUP |
5699                  XMAC_CONFIG_ERR_CHK_DIS |
5700                  XMAC_CONFIG_RX_CRC_CHK_DIS |
5701                  XMAC_CONFIG_RESERVED_MULTICAST |
5702                  XMAC_CONFIG_RX_CODEV_CHK_DIS |
5703                  XMAC_CONFIG_ADDR_FILTER_EN |
5704                  XMAC_CONFIG_RCV_PAUSE_ENABLE |
5705                  XMAC_CONFIG_STRIP_CRC |
5706                  XMAC_CONFIG_PASS_FLOW_CTRL |
5707                  XMAC_CONFIG_MAC2IPP_PKT_CNT_EN);
5708         val |= (XMAC_CONFIG_HASH_FILTER_EN);
5709         nw64_mac(XMAC_CONFIG, val);
5710
5711         nw64_mac(RXMAC_BT_CNT, 0);
5712         nw64_mac(RXMAC_BC_FRM_CNT, 0);
5713         nw64_mac(RXMAC_MC_FRM_CNT, 0);
5714         nw64_mac(RXMAC_FRAG_CNT, 0);
5715         nw64_mac(RXMAC_HIST_CNT1, 0);
5716         nw64_mac(RXMAC_HIST_CNT2, 0);
5717         nw64_mac(RXMAC_HIST_CNT3, 0);
5718         nw64_mac(RXMAC_HIST_CNT4, 0);
5719         nw64_mac(RXMAC_HIST_CNT5, 0);
5720         nw64_mac(RXMAC_HIST_CNT6, 0);
5721         nw64_mac(RXMAC_HIST_CNT7, 0);
5722         nw64_mac(RXMAC_MPSZER_CNT, 0);
5723         nw64_mac(RXMAC_CRC_ER_CNT, 0);
5724         nw64_mac(RXMAC_CD_VIO_CNT, 0);
5725         nw64_mac(LINK_FAULT_CNT, 0);
5726 }
5727
5728 static void niu_init_rx_bmac(struct niu *np)
5729 {
5730         struct niu_parent *parent = np->parent;
5731         struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
5732         int first_rdc_table = tp->first_table_num;
5733         unsigned long i;
5734         u64 val;
5735
5736         nw64_mac(BMAC_ADD_FILT0, 0);
5737         nw64_mac(BMAC_ADD_FILT1, 0);
5738         nw64_mac(BMAC_ADD_FILT2, 0);
5739         nw64_mac(BMAC_ADD_FILT12_MASK, 0);
5740         nw64_mac(BMAC_ADD_FILT00_MASK, 0);
5741         for (i = 0; i < MAC_NUM_HASH; i++)
5742                 nw64_mac(BMAC_HASH_TBL(i), 0);
5743         niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
5744         niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
5745         nw64_mac(BRXMAC_STATUS_MASK, ~(u64)0);
5746
5747         val = nr64_mac(BRXMAC_CONFIG);
5748         val &= ~(BRXMAC_CONFIG_ENABLE |
5749                  BRXMAC_CONFIG_STRIP_PAD |
5750                  BRXMAC_CONFIG_STRIP_FCS |
5751                  BRXMAC_CONFIG_PROMISC |
5752                  BRXMAC_CONFIG_PROMISC_GRP |
5753                  BRXMAC_CONFIG_ADDR_FILT_EN |
5754                  BRXMAC_CONFIG_DISCARD_DIS);
5755         val |= (BRXMAC_CONFIG_HASH_FILT_EN);
5756         nw64_mac(BRXMAC_CONFIG, val);
5757
5758         val = nr64_mac(BMAC_ADDR_CMPEN);
5759         val |= BMAC_ADDR_CMPEN_EN0;
5760         nw64_mac(BMAC_ADDR_CMPEN, val);
5761 }
5762
5763 static void niu_init_rx_mac(struct niu *np)
5764 {
5765         niu_set_primary_mac(np, np->dev->dev_addr);
5766
5767         if (np->flags & NIU_FLAGS_XMAC)
5768                 niu_init_rx_xmac(np);
5769         else
5770                 niu_init_rx_bmac(np);
5771 }
5772
5773 static void niu_enable_tx_xmac(struct niu *np, int on)
5774 {
5775         u64 val = nr64_mac(XMAC_CONFIG);
5776
5777         if (on)
5778                 val |= XMAC_CONFIG_TX_ENABLE;
5779         else
5780                 val &= ~XMAC_CONFIG_TX_ENABLE;
5781         nw64_mac(XMAC_CONFIG, val);
5782 }
5783
5784 static void niu_enable_tx_bmac(struct niu *np, int on)
5785 {
5786         u64 val = nr64_mac(BTXMAC_CONFIG);
5787
5788         if (on)
5789                 val |= BTXMAC_CONFIG_ENABLE;
5790         else
5791                 val &= ~BTXMAC_CONFIG_ENABLE;
5792         nw64_mac(BTXMAC_CONFIG, val);
5793 }
5794
5795 static void niu_enable_tx_mac(struct niu *np, int on)
5796 {
5797         if (np->flags & NIU_FLAGS_XMAC)
5798                 niu_enable_tx_xmac(np, on);
5799         else
5800                 niu_enable_tx_bmac(np, on);
5801 }
5802
5803 static void niu_enable_rx_xmac(struct niu *np, int on)
5804 {
5805         u64 val = nr64_mac(XMAC_CONFIG);
5806
5807         val &= ~(XMAC_CONFIG_HASH_FILTER_EN |
5808                  XMAC_CONFIG_PROMISCUOUS);
5809
5810         if (np->flags & NIU_FLAGS_MCAST)
5811                 val |= XMAC_CONFIG_HASH_FILTER_EN;
5812         if (np->flags & NIU_FLAGS_PROMISC)
5813                 val |= XMAC_CONFIG_PROMISCUOUS;
5814
5815         if (on)
5816                 val |= XMAC_CONFIG_RX_MAC_ENABLE;
5817         else
5818                 val &= ~XMAC_CONFIG_RX_MAC_ENABLE;
5819         nw64_mac(XMAC_CONFIG, val);
5820 }
5821
5822 static void niu_enable_rx_bmac(struct niu *np, int on)
5823 {
5824         u64 val = nr64_mac(BRXMAC_CONFIG);
5825
5826         val &= ~(BRXMAC_CONFIG_HASH_FILT_EN |
5827                  BRXMAC_CONFIG_PROMISC);
5828
5829         if (np->flags & NIU_FLAGS_MCAST)
5830                 val |= BRXMAC_CONFIG_HASH_FILT_EN;
5831         if (np->flags & NIU_FLAGS_PROMISC)
5832                 val |= BRXMAC_CONFIG_PROMISC;
5833
5834         if (on)
5835                 val |= BRXMAC_CONFIG_ENABLE;
5836         else
5837                 val &= ~BRXMAC_CONFIG_ENABLE;
5838         nw64_mac(BRXMAC_CONFIG, val);
5839 }
5840
5841 static void niu_enable_rx_mac(struct niu *np, int on)
5842 {
5843         if (np->flags & NIU_FLAGS_XMAC)
5844                 niu_enable_rx_xmac(np, on);
5845         else
5846                 niu_enable_rx_bmac(np, on);
5847 }
5848
5849 static int niu_init_mac(struct niu *np)
5850 {
5851         int err;
5852
5853         niu_init_xif(np);
5854         err = niu_init_pcs(np);
5855         if (err)
5856                 return err;
5857
5858         err = niu_reset_tx_mac(np);
5859         if (err)
5860                 return err;
5861         niu_init_tx_mac(np);
5862         err = niu_reset_rx_mac(np);
5863         if (err)
5864                 return err;
5865         niu_init_rx_mac(np);
5866
5867         /* This looks hookey but the RX MAC reset we just did will
5868          * undo some of the state we setup in niu_init_tx_mac() so we
5869          * have to call it again.  In particular, the RX MAC reset will
5870          * set the XMAC_MAX register back to it's default value.
5871          */
5872         niu_init_tx_mac(np);
5873         niu_enable_tx_mac(np, 1);
5874
5875         niu_enable_rx_mac(np, 1);
5876
5877         return 0;
5878 }
5879
5880 static void niu_stop_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
5881 {
5882         (void) niu_tx_channel_stop(np, rp->tx_channel);
5883 }
5884
5885 static void niu_stop_tx_channels(struct niu *np)
5886 {
5887         int i;
5888
5889         for (i = 0; i < np->num_tx_rings; i++) {
5890                 struct tx_ring_info *rp = &np->tx_rings[i];
5891
5892                 niu_stop_one_tx_channel(np, rp);
5893         }
5894 }
5895
5896 static void niu_reset_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
5897 {
5898         (void) niu_tx_channel_reset(np, rp->tx_channel);
5899 }
5900
5901 static void niu_reset_tx_channels(struct niu *np)
5902 {
5903         int i;
5904
5905         for (i = 0; i < np->num_tx_rings; i++) {
5906                 struct tx_ring_info *rp = &np->tx_rings[i];
5907
5908                 niu_reset_one_tx_channel(np, rp);
5909         }
5910 }
5911
5912 static void niu_stop_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
5913 {
5914         (void) niu_enable_rx_channel(np, rp->rx_channel, 0);
5915 }
5916
5917 static void niu_stop_rx_channels(struct niu *np)
5918 {
5919         int i;
5920
5921         for (i = 0; i < np->num_rx_rings; i++) {
5922                 struct rx_ring_info *rp = &np->rx_rings[i];
5923
5924                 niu_stop_one_rx_channel(np, rp);
5925         }
5926 }
5927
5928 static void niu_reset_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
5929 {
5930         int channel = rp->rx_channel;
5931
5932         (void) niu_rx_channel_reset(np, channel);
5933         nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_ALL);
5934         nw64(RX_DMA_CTL_STAT(channel), 0);
5935         (void) niu_enable_rx_channel(np, channel, 0);
5936 }
5937
5938 static void niu_reset_rx_channels(struct niu *np)
5939 {
5940         int i;
5941
5942         for (i = 0; i < np->num_rx_rings; i++) {
5943                 struct rx_ring_info *rp = &np->rx_rings[i];
5944
5945                 niu_reset_one_rx_channel(np, rp);
5946         }
5947 }
5948
5949 static void niu_disable_ipp(struct niu *np)
5950 {
5951         u64 rd, wr, val;
5952         int limit;
5953
5954         rd = nr64_ipp(IPP_DFIFO_RD_PTR);
5955         wr = nr64_ipp(IPP_DFIFO_WR_PTR);
5956         limit = 100;
5957         while (--limit >= 0 && (rd != wr)) {
5958                 rd = nr64_ipp(IPP_DFIFO_RD_PTR);
5959                 wr = nr64_ipp(IPP_DFIFO_WR_PTR);
5960         }
5961         if (limit < 0 &&
5962             (rd != 0 && wr != 1)) {
5963                 dev_err(np->device, PFX "%s: IPP would not quiesce, "
5964                         "rd_ptr[%llx] wr_ptr[%llx]\n",
5965                         np->dev->name,
5966                         (unsigned long long) nr64_ipp(IPP_DFIFO_RD_PTR),
5967                         (unsigned long long) nr64_ipp(IPP_DFIFO_WR_PTR));
5968         }
5969
5970         val = nr64_ipp(IPP_CFIG);
5971         val &= ~(IPP_CFIG_IPP_ENABLE |
5972                  IPP_CFIG_DFIFO_ECC_EN |
5973                  IPP_CFIG_DROP_BAD_CRC |
5974                  IPP_CFIG_CKSUM_EN);
5975         nw64_ipp(IPP_CFIG, val);
5976
5977         (void) niu_ipp_reset(np);
5978 }
5979
5980 static int niu_init_hw(struct niu *np)
5981 {
5982         int i, err;
5983
5984         niudbg(IFUP, "%s: Initialize TXC\n", np->dev->name);
5985         niu_txc_enable_port(np, 1);
5986         niu_txc_port_dma_enable(np, 1);
5987         niu_txc_set_imask(np, 0);
5988
5989         niudbg(IFUP, "%s: Initialize TX channels\n", np->dev->name);
5990         for (i = 0; i < np->num_tx_rings; i++) {
5991                 struct tx_ring_info *rp = &np->tx_rings[i];
5992
5993                 err = niu_init_one_tx_channel(np, rp);
5994                 if (err)
5995                         return err;
5996         }
5997
5998         niudbg(IFUP, "%s: Initialize RX channels\n", np->dev->name);
5999         err = niu_init_rx_channels(np);
6000         if (err)
6001                 goto out_uninit_tx_channels;
6002
6003         niudbg(IFUP, "%s: Initialize classifier\n", np->dev->name);
6004         err = niu_init_classifier_hw(np);
6005         if (err)
6006                 goto out_uninit_rx_channels;
6007
6008         niudbg(IFUP, "%s: Initialize ZCP\n", np->dev->name);
6009         err = niu_init_zcp(np);
6010         if (err)
6011                 goto out_uninit_rx_channels;
6012
6013         niudbg(IFUP, "%s: Initialize IPP\n", np->dev->name);
6014         err = niu_init_ipp(np);
6015         if (err)
6016                 goto out_uninit_rx_channels;
6017
6018         niudbg(IFUP, "%s: Initialize MAC\n", np->dev->name);
6019         err = niu_init_mac(np);
6020         if (err)
6021                 goto out_uninit_ipp;
6022
6023         return 0;
6024
6025 out_uninit_ipp:
6026         niudbg(IFUP, "%s: Uninit IPP\n", np->dev->name);
6027         niu_disable_ipp(np);
6028
6029 out_uninit_rx_channels:
6030         niudbg(IFUP, "%s: Uninit RX channels\n", np->dev->name);
6031         niu_stop_rx_channels(np);
6032         niu_reset_rx_channels(np);
6033
6034 out_uninit_tx_channels:
6035         niudbg(IFUP, "%s: Uninit TX channels\n", np->dev->name);
6036         niu_stop_tx_channels(np);
6037         niu_reset_tx_channels(np);
6038
6039         return err;
6040 }
6041
6042 static void niu_stop_hw(struct niu *np)
6043 {
6044         niudbg(IFDOWN, "%s: Disable interrupts\n", np->dev->name);
6045         niu_enable_interrupts(np, 0);
6046
6047         niudbg(IFDOWN, "%s: Disable RX MAC\n", np->dev->name);
6048         niu_enable_rx_mac(np, 0);
6049
6050         niudbg(IFDOWN, "%s: Disable IPP\n", np->dev->name);
6051         niu_disable_ipp(np);
6052
6053         niudbg(IFDOWN, "%s: Stop TX channels\n", np->dev->name);
6054         niu_stop_tx_channels(np);
6055
6056         niudbg(IFDOWN, "%s: Stop RX channels\n", np->dev->name);
6057         niu_stop_rx_channels(np);
6058
6059         niudbg(IFDOWN, "%s: Reset TX channels\n", np->dev->name);
6060         niu_reset_tx_channels(np);
6061
6062         niudbg(IFDOWN, "%s: Reset RX channels\n", np->dev->name);
6063         niu_reset_rx_channels(np);
6064 }
6065
6066 static void niu_set_irq_name(struct niu *np)
6067 {
6068         int port = np->port;
6069         int i, j = 1;
6070
6071         sprintf(np->irq_name[0], "%s:MAC", np->dev->name);
6072
6073         if (port == 0) {
6074                 sprintf(np->irq_name[1], "%s:MIF", np->dev->name);
6075                 sprintf(np->irq_name[2], "%s:SYSERR", np->dev->name);
6076                 j = 3;
6077         }
6078
6079         for (i = 0; i < np->num_ldg - j; i++) {
6080                 if (i < np->num_rx_rings)
6081                         sprintf(np->irq_name[i+j], "%s-rx-%d",
6082                                 np->dev->name, i);
6083                 else if (i < np->num_tx_rings + np->num_rx_rings)
6084                         sprintf(np->irq_name[i+j], "%s-tx-%d", np->dev->name,
6085                                 i - np->num_rx_rings);
6086         }
6087 }
6088
6089 static int niu_request_irq(struct niu *np)
6090 {
6091         int i, j, err;
6092
6093         niu_set_irq_name(np);
6094
6095         err = 0;
6096         for (i = 0; i < np->num_ldg; i++) {
6097                 struct niu_ldg *lp = &np->ldg[i];
6098
6099                 err = request_irq(lp->irq, niu_interrupt,
6100                                   IRQF_SHARED | IRQF_SAMPLE_RANDOM,
6101                                   np->irq_name[i], lp);
6102                 if (err)
6103                         goto out_free_irqs;
6104
6105         }
6106
6107         return 0;
6108
6109 out_free_irqs:
6110         for (j = 0; j < i; j++) {
6111                 struct niu_ldg *lp = &np->ldg[j];
6112
6113                 free_irq(lp->irq, lp);
6114         }
6115         return err;
6116 }
6117
6118 static void niu_free_irq(struct niu *np)
6119 {
6120         int i;
6121
6122         for (i = 0; i < np->num_ldg; i++) {
6123                 struct niu_ldg *lp = &np->ldg[i];
6124
6125                 free_irq(lp->irq, lp);
6126         }
6127 }
6128
6129 static void niu_enable_napi(struct niu *np)
6130 {
6131         int i;
6132
6133         for (i = 0; i < np->num_ldg; i++)
6134                 napi_enable(&np->ldg[i].napi);
6135 }
6136
6137 static void niu_disable_napi(struct niu *np)
6138 {
6139         int i;
6140
6141         for (i = 0; i < np->num_ldg; i++)
6142                 napi_disable(&np->ldg[i].napi);
6143 }
6144
6145 static int niu_open(struct net_device *dev)
6146 {
6147         struct niu *np = netdev_priv(dev);
6148         int err;
6149
6150         netif_carrier_off(dev);
6151
6152         err = niu_alloc_channels(np);
6153         if (err)
6154                 goto out_err;
6155
6156         err = niu_enable_interrupts(np, 0);
6157         if (err)
6158                 goto out_free_channels;
6159
6160         err = niu_request_irq(np);
6161         if (err)
6162                 goto out_free_channels;
6163
6164         niu_enable_napi(np);
6165
6166         spin_lock_irq(&np->lock);
6167
6168         err = niu_init_hw(np);
6169         if (!err) {
6170                 init_timer(&np->timer);
6171                 np->timer.expires = jiffies + HZ;
6172                 np->timer.data = (unsigned long) np;
6173                 np->timer.function = niu_timer;
6174
6175                 err = niu_enable_interrupts(np, 1);
6176                 if (err)
6177                         niu_stop_hw(np);
6178         }
6179
6180         spin_unlock_irq(&np->lock);
6181
6182         if (err) {
6183                 niu_disable_napi(np);
6184                 goto out_free_irq;
6185         }
6186
6187         netif_tx_start_all_queues(dev);
6188
6189         if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
6190                 netif_carrier_on(dev);
6191
6192         add_timer(&np->timer);
6193
6194         return 0;
6195
6196 out_free_irq:
6197         niu_free_irq(np);
6198
6199 out_free_channels:
6200         niu_free_channels(np);
6201
6202 out_err:
6203         return err;
6204 }
6205
6206 static void niu_full_shutdown(struct niu *np, struct net_device *dev)
6207 {
6208         cancel_work_sync(&np->reset_task);
6209
6210         niu_disable_napi(np);
6211         netif_tx_stop_all_queues(dev);
6212
6213         del_timer_sync(&np->timer);
6214
6215         spin_lock_irq(&np->lock);
6216
6217         niu_stop_hw(np);
6218
6219         spin_unlock_irq(&np->lock);
6220 }
6221
6222 static int niu_close(struct net_device *dev)
6223 {
6224         struct niu *np = netdev_priv(dev);
6225
6226         niu_full_shutdown(np, dev);
6227
6228         niu_free_irq(np);
6229
6230         niu_free_channels(np);
6231
6232         niu_handle_led(np, 0);
6233
6234         return 0;
6235 }
6236
6237 static void niu_sync_xmac_stats(struct niu *np)
6238 {
6239         struct niu_xmac_stats *mp = &np->mac_stats.xmac;
6240
6241         mp->tx_frames += nr64_mac(TXMAC_FRM_CNT);
6242         mp->tx_bytes += nr64_mac(TXMAC_BYTE_CNT);
6243
6244         mp->rx_link_faults += nr64_mac(LINK_FAULT_CNT);
6245         mp->rx_align_errors += nr64_mac(RXMAC_ALIGN_ERR_CNT);
6246         mp->rx_frags += nr64_mac(RXMAC_FRAG_CNT);
6247         mp->rx_mcasts += nr64_mac(RXMAC_MC_FRM_CNT);
6248         mp->rx_bcasts += nr64_mac(RXMAC_BC_FRM_CNT);
6249         mp->rx_hist_cnt1 += nr64_mac(RXMAC_HIST_CNT1);
6250         mp->rx_hist_cnt2 += nr64_mac(RXMAC_HIST_CNT2);
6251         mp->rx_hist_cnt3 += nr64_mac(RXMAC_HIST_CNT3);
6252         mp->rx_hist_cnt4 += nr64_mac(RXMAC_HIST_CNT4);
6253         mp->rx_hist_cnt5 += nr64_mac(RXMAC_HIST_CNT5);
6254         mp->rx_hist_cnt6 += nr64_mac(RXMAC_HIST_CNT6);
6255         mp->rx_hist_cnt7 += nr64_mac(RXMAC_HIST_CNT7);
6256         mp->rx_octets += nr64_mac(RXMAC_BT_CNT);
6257         mp->rx_code_violations += nr64_mac(RXMAC_CD_VIO_CNT);
6258         mp->rx_len_errors += nr64_mac(RXMAC_MPSZER_CNT);
6259         mp->rx_crc_errors += nr64_mac(RXMAC_CRC_ER_CNT);
6260 }
6261
6262 static void niu_sync_bmac_stats(struct niu *np)
6263 {
6264         struct niu_bmac_stats *mp = &np->mac_stats.bmac;
6265
6266         mp->tx_bytes += nr64_mac(BTXMAC_BYTE_CNT);
6267         mp->tx_frames += nr64_mac(BTXMAC_FRM_CNT);
6268
6269         mp->rx_frames += nr64_mac(BRXMAC_FRAME_CNT);
6270         mp->rx_align_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT);
6271         mp->rx_crc_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT);
6272         mp->rx_len_errors += nr64_mac(BRXMAC_CODE_VIOL_ERR_CNT);
6273 }
6274
6275 static void niu_sync_mac_stats(struct niu *np)
6276 {
6277         if (np->flags & NIU_FLAGS_XMAC)
6278                 niu_sync_xmac_stats(np);
6279         else
6280                 niu_sync_bmac_stats(np);
6281 }
6282
6283 static void niu_get_rx_stats(struct niu *np)
6284 {
6285         unsigned long pkts, dropped, errors, bytes;
6286         int i;
6287
6288         pkts = dropped = errors = bytes = 0;
6289         for (i = 0; i < np->num_rx_rings; i++) {
6290                 struct rx_ring_info *rp = &np->rx_rings[i];
6291
6292                 niu_sync_rx_discard_stats(np, rp, 0);
6293
6294                 pkts += rp->rx_packets;
6295                 bytes += rp->rx_bytes;
6296                 dropped += rp->rx_dropped;
6297                 errors += rp->rx_errors;
6298         }
6299         np->dev->stats.rx_packets = pkts;
6300         np->dev->stats.rx_bytes = bytes;
6301         np->dev->stats.rx_dropped = dropped;
6302         np->dev->stats.rx_errors = errors;
6303 }
6304
6305 static void niu_get_tx_stats(struct niu *np)
6306 {
6307         unsigned long pkts, errors, bytes;
6308         int i;
6309
6310         pkts = errors = bytes = 0;
6311         for (i = 0; i < np->num_tx_rings; i++) {
6312                 struct tx_ring_info *rp = &np->tx_rings[i];
6313
6314                 pkts += rp->tx_packets;
6315                 bytes += rp->tx_bytes;
6316                 errors += rp->tx_errors;
6317         }
6318         np->dev->stats.tx_packets = pkts;
6319         np->dev->stats.tx_bytes = bytes;
6320         np->dev->stats.tx_errors = errors;
6321 }
6322
6323 static struct net_device_stats *niu_get_stats(struct net_device *dev)
6324 {
6325         struct niu *np = netdev_priv(dev);
6326
6327         niu_get_rx_stats(np);
6328         niu_get_tx_stats(np);
6329
6330         return &dev->stats;
6331 }
6332
6333 static void niu_load_hash_xmac(struct niu *np, u16 *hash)
6334 {
6335         int i;
6336
6337         for (i = 0; i < 16; i++)
6338                 nw64_mac(XMAC_HASH_TBL(i), hash[i]);
6339 }
6340
6341 static void niu_load_hash_bmac(struct niu *np, u16 *hash)
6342 {
6343         int i;
6344
6345         for (i = 0; i < 16; i++)
6346                 nw64_mac(BMAC_HASH_TBL(i), hash[i]);
6347 }
6348
6349 static void niu_load_hash(struct niu *np, u16 *hash)
6350 {
6351         if (np->flags & NIU_FLAGS_XMAC)
6352                 niu_load_hash_xmac(np, hash);
6353         else
6354                 niu_load_hash_bmac(np, hash);
6355 }
6356
6357 static void niu_set_rx_mode(struct net_device *dev)
6358 {
6359         struct niu *np = netdev_priv(dev);
6360         int i, alt_cnt, err;
6361         struct dev_addr_list *addr;
6362         struct netdev_hw_addr *ha;
6363         unsigned long flags;
6364         u16 hash[16] = { 0, };
6365
6366         spin_lock_irqsave(&np->lock, flags);
6367         niu_enable_rx_mac(np, 0);
6368
6369         np->flags &= ~(NIU_FLAGS_MCAST | NIU_FLAGS_PROMISC);
6370         if (dev->flags & IFF_PROMISC)
6371                 np->flags |= NIU_FLAGS_PROMISC;
6372         if ((dev->flags & IFF_ALLMULTI) || (!netdev_mc_empty(dev)))
6373                 np->flags |= NIU_FLAGS_MCAST;
6374
6375         alt_cnt = netdev_uc_count(dev);
6376         if (alt_cnt > niu_num_alt_addr(np)) {
6377                 alt_cnt = 0;
6378                 np->flags |= NIU_FLAGS_PROMISC;
6379         }
6380
6381         if (alt_cnt) {
6382                 int index = 0;
6383
6384                 netdev_for_each_uc_addr(ha, dev) {
6385                         err = niu_set_alt_mac(np, index, ha->addr);
6386                         if (err)
6387                                 printk(KERN_WARNING PFX "%s: Error %d "
6388