netdev: add more functions to netdevice ops
[linux-2.6.git] / drivers / net / niu.c
1 /* niu.c: Neptune ethernet driver.
2  *
3  * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
4  */
5
6 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/pci.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/netdevice.h>
11 #include <linux/ethtool.h>
12 #include <linux/etherdevice.h>
13 #include <linux/platform_device.h>
14 #include <linux/delay.h>
15 #include <linux/bitops.h>
16 #include <linux/mii.h>
17 #include <linux/if_ether.h>
18 #include <linux/if_vlan.h>
19 #include <linux/ip.h>
20 #include <linux/in.h>
21 #include <linux/ipv6.h>
22 #include <linux/log2.h>
23 #include <linux/jiffies.h>
24 #include <linux/crc32.h>
25
26 #include <linux/io.h>
27
28 #ifdef CONFIG_SPARC64
29 #include <linux/of_device.h>
30 #endif
31
32 #include "niu.h"
33
34 #define DRV_MODULE_NAME         "niu"
35 #define PFX DRV_MODULE_NAME     ": "
36 #define DRV_MODULE_VERSION      "1.0"
37 #define DRV_MODULE_RELDATE      "Nov 14, 2008"
38
39 static char version[] __devinitdata =
40         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
41
42 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
43 MODULE_DESCRIPTION("NIU ethernet driver");
44 MODULE_LICENSE("GPL");
45 MODULE_VERSION(DRV_MODULE_VERSION);
46
47 #ifndef DMA_44BIT_MASK
48 #define DMA_44BIT_MASK  0x00000fffffffffffULL
49 #endif
50
51 #ifndef readq
52 static u64 readq(void __iomem *reg)
53 {
54         return ((u64) readl(reg)) | (((u64) readl(reg + 4UL)) << 32);
55 }
56
57 static void writeq(u64 val, void __iomem *reg)
58 {
59         writel(val & 0xffffffff, reg);
60         writel(val >> 32, reg + 0x4UL);
61 }
62 #endif
63
64 static struct pci_device_id niu_pci_tbl[] = {
65         {PCI_DEVICE(PCI_VENDOR_ID_SUN, 0xabcd)},
66         {}
67 };
68
69 MODULE_DEVICE_TABLE(pci, niu_pci_tbl);
70
71 #define NIU_TX_TIMEOUT                  (5 * HZ)
72
73 #define nr64(reg)               readq(np->regs + (reg))
74 #define nw64(reg, val)          writeq((val), np->regs + (reg))
75
76 #define nr64_mac(reg)           readq(np->mac_regs + (reg))
77 #define nw64_mac(reg, val)      writeq((val), np->mac_regs + (reg))
78
79 #define nr64_ipp(reg)           readq(np->regs + np->ipp_off + (reg))
80 #define nw64_ipp(reg, val)      writeq((val), np->regs + np->ipp_off + (reg))
81
82 #define nr64_pcs(reg)           readq(np->regs + np->pcs_off + (reg))
83 #define nw64_pcs(reg, val)      writeq((val), np->regs + np->pcs_off + (reg))
84
85 #define nr64_xpcs(reg)          readq(np->regs + np->xpcs_off + (reg))
86 #define nw64_xpcs(reg, val)     writeq((val), np->regs + np->xpcs_off + (reg))
87
88 #define NIU_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
89
90 static int niu_debug;
91 static int debug = -1;
92 module_param(debug, int, 0);
93 MODULE_PARM_DESC(debug, "NIU debug level");
94
95 #define niudbg(TYPE, f, a...) \
96 do {    if ((np)->msg_enable & NETIF_MSG_##TYPE) \
97                 printk(KERN_DEBUG PFX f, ## a); \
98 } while (0)
99
100 #define niuinfo(TYPE, f, a...) \
101 do {    if ((np)->msg_enable & NETIF_MSG_##TYPE) \
102                 printk(KERN_INFO PFX f, ## a); \
103 } while (0)
104
105 #define niuwarn(TYPE, f, a...) \
106 do {    if ((np)->msg_enable & NETIF_MSG_##TYPE) \
107                 printk(KERN_WARNING PFX f, ## a); \
108 } while (0)
109
110 #define niu_lock_parent(np, flags) \
111         spin_lock_irqsave(&np->parent->lock, flags)
112 #define niu_unlock_parent(np, flags) \
113         spin_unlock_irqrestore(&np->parent->lock, flags)
114
115 static int serdes_init_10g_serdes(struct niu *np);
116
117 static int __niu_wait_bits_clear_mac(struct niu *np, unsigned long reg,
118                                      u64 bits, int limit, int delay)
119 {
120         while (--limit >= 0) {
121                 u64 val = nr64_mac(reg);
122
123                 if (!(val & bits))
124                         break;
125                 udelay(delay);
126         }
127         if (limit < 0)
128                 return -ENODEV;
129         return 0;
130 }
131
132 static int __niu_set_and_wait_clear_mac(struct niu *np, unsigned long reg,
133                                         u64 bits, int limit, int delay,
134                                         const char *reg_name)
135 {
136         int err;
137
138         nw64_mac(reg, bits);
139         err = __niu_wait_bits_clear_mac(np, reg, bits, limit, delay);
140         if (err)
141                 dev_err(np->device, PFX "%s: bits (%llx) of register %s "
142                         "would not clear, val[%llx]\n",
143                         np->dev->name, (unsigned long long) bits, reg_name,
144                         (unsigned long long) nr64_mac(reg));
145         return err;
146 }
147
148 #define niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
149 ({      BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
150         __niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
151 })
152
153 static int __niu_wait_bits_clear_ipp(struct niu *np, unsigned long reg,
154                                      u64 bits, int limit, int delay)
155 {
156         while (--limit >= 0) {
157                 u64 val = nr64_ipp(reg);
158
159                 if (!(val & bits))
160                         break;
161                 udelay(delay);
162         }
163         if (limit < 0)
164                 return -ENODEV;
165         return 0;
166 }
167
168 static int __niu_set_and_wait_clear_ipp(struct niu *np, unsigned long reg,
169                                         u64 bits, int limit, int delay,
170                                         const char *reg_name)
171 {
172         int err;
173         u64 val;
174
175         val = nr64_ipp(reg);
176         val |= bits;
177         nw64_ipp(reg, val);
178
179         err = __niu_wait_bits_clear_ipp(np, reg, bits, limit, delay);
180         if (err)
181                 dev_err(np->device, PFX "%s: bits (%llx) of register %s "
182                         "would not clear, val[%llx]\n",
183                         np->dev->name, (unsigned long long) bits, reg_name,
184                         (unsigned long long) nr64_ipp(reg));
185         return err;
186 }
187
188 #define niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
189 ({      BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
190         __niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
191 })
192
193 static int __niu_wait_bits_clear(struct niu *np, unsigned long reg,
194                                  u64 bits, int limit, int delay)
195 {
196         while (--limit >= 0) {
197                 u64 val = nr64(reg);
198
199                 if (!(val & bits))
200                         break;
201                 udelay(delay);
202         }
203         if (limit < 0)
204                 return -ENODEV;
205         return 0;
206 }
207
208 #define niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY) \
209 ({      BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
210         __niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY); \
211 })
212
213 static int __niu_set_and_wait_clear(struct niu *np, unsigned long reg,
214                                     u64 bits, int limit, int delay,
215                                     const char *reg_name)
216 {
217         int err;
218
219         nw64(reg, bits);
220         err = __niu_wait_bits_clear(np, reg, bits, limit, delay);
221         if (err)
222                 dev_err(np->device, PFX "%s: bits (%llx) of register %s "
223                         "would not clear, val[%llx]\n",
224                         np->dev->name, (unsigned long long) bits, reg_name,
225                         (unsigned long long) nr64(reg));
226         return err;
227 }
228
229 #define niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
230 ({      BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
231         __niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
232 })
233
234 static void niu_ldg_rearm(struct niu *np, struct niu_ldg *lp, int on)
235 {
236         u64 val = (u64) lp->timer;
237
238         if (on)
239                 val |= LDG_IMGMT_ARM;
240
241         nw64(LDG_IMGMT(lp->ldg_num), val);
242 }
243
244 static int niu_ldn_irq_enable(struct niu *np, int ldn, int on)
245 {
246         unsigned long mask_reg, bits;
247         u64 val;
248
249         if (ldn < 0 || ldn > LDN_MAX)
250                 return -EINVAL;
251
252         if (ldn < 64) {
253                 mask_reg = LD_IM0(ldn);
254                 bits = LD_IM0_MASK;
255         } else {
256                 mask_reg = LD_IM1(ldn - 64);
257                 bits = LD_IM1_MASK;
258         }
259
260         val = nr64(mask_reg);
261         if (on)
262                 val &= ~bits;
263         else
264                 val |= bits;
265         nw64(mask_reg, val);
266
267         return 0;
268 }
269
270 static int niu_enable_ldn_in_ldg(struct niu *np, struct niu_ldg *lp, int on)
271 {
272         struct niu_parent *parent = np->parent;
273         int i;
274
275         for (i = 0; i <= LDN_MAX; i++) {
276                 int err;
277
278                 if (parent->ldg_map[i] != lp->ldg_num)
279                         continue;
280
281                 err = niu_ldn_irq_enable(np, i, on);
282                 if (err)
283                         return err;
284         }
285         return 0;
286 }
287
288 static int niu_enable_interrupts(struct niu *np, int on)
289 {
290         int i;
291
292         for (i = 0; i < np->num_ldg; i++) {
293                 struct niu_ldg *lp = &np->ldg[i];
294                 int err;
295
296                 err = niu_enable_ldn_in_ldg(np, lp, on);
297                 if (err)
298                         return err;
299         }
300         for (i = 0; i < np->num_ldg; i++)
301                 niu_ldg_rearm(np, &np->ldg[i], on);
302
303         return 0;
304 }
305
306 static u32 phy_encode(u32 type, int port)
307 {
308         return (type << (port * 2));
309 }
310
311 static u32 phy_decode(u32 val, int port)
312 {
313         return (val >> (port * 2)) & PORT_TYPE_MASK;
314 }
315
316 static int mdio_wait(struct niu *np)
317 {
318         int limit = 1000;
319         u64 val;
320
321         while (--limit > 0) {
322                 val = nr64(MIF_FRAME_OUTPUT);
323                 if ((val >> MIF_FRAME_OUTPUT_TA_SHIFT) & 0x1)
324                         return val & MIF_FRAME_OUTPUT_DATA;
325
326                 udelay(10);
327         }
328
329         return -ENODEV;
330 }
331
332 static int mdio_read(struct niu *np, int port, int dev, int reg)
333 {
334         int err;
335
336         nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg));
337         err = mdio_wait(np);
338         if (err < 0)
339                 return err;
340
341         nw64(MIF_FRAME_OUTPUT, MDIO_READ_OP(port, dev));
342         return mdio_wait(np);
343 }
344
345 static int mdio_write(struct niu *np, int port, int dev, int reg, int data)
346 {
347         int err;
348
349         nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg));
350         err = mdio_wait(np);
351         if (err < 0)
352                 return err;
353
354         nw64(MIF_FRAME_OUTPUT, MDIO_WRITE_OP(port, dev, data));
355         err = mdio_wait(np);
356         if (err < 0)
357                 return err;
358
359         return 0;
360 }
361
362 static int mii_read(struct niu *np, int port, int reg)
363 {
364         nw64(MIF_FRAME_OUTPUT, MII_READ_OP(port, reg));
365         return mdio_wait(np);
366 }
367
368 static int mii_write(struct niu *np, int port, int reg, int data)
369 {
370         int err;
371
372         nw64(MIF_FRAME_OUTPUT, MII_WRITE_OP(port, reg, data));
373         err = mdio_wait(np);
374         if (err < 0)
375                 return err;
376
377         return 0;
378 }
379
380 static int esr2_set_tx_cfg(struct niu *np, unsigned long channel, u32 val)
381 {
382         int err;
383
384         err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
385                          ESR2_TI_PLL_TX_CFG_L(channel),
386                          val & 0xffff);
387         if (!err)
388                 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
389                                  ESR2_TI_PLL_TX_CFG_H(channel),
390                                  val >> 16);
391         return err;
392 }
393
394 static int esr2_set_rx_cfg(struct niu *np, unsigned long channel, u32 val)
395 {
396         int err;
397
398         err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
399                          ESR2_TI_PLL_RX_CFG_L(channel),
400                          val & 0xffff);
401         if (!err)
402                 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
403                                  ESR2_TI_PLL_RX_CFG_H(channel),
404                                  val >> 16);
405         return err;
406 }
407
408 /* Mode is always 10G fiber.  */
409 static int serdes_init_niu_10g_fiber(struct niu *np)
410 {
411         struct niu_link_config *lp = &np->link_config;
412         u32 tx_cfg, rx_cfg;
413         unsigned long i;
414
415         tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV);
416         rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
417                   PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
418                   PLL_RX_CFG_EQ_LP_ADAPTIVE);
419
420         if (lp->loopback_mode == LOOPBACK_PHY) {
421                 u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
422
423                 mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
424                            ESR2_TI_PLL_TEST_CFG_L, test_cfg);
425
426                 tx_cfg |= PLL_TX_CFG_ENTEST;
427                 rx_cfg |= PLL_RX_CFG_ENTEST;
428         }
429
430         /* Initialize all 4 lanes of the SERDES.  */
431         for (i = 0; i < 4; i++) {
432                 int err = esr2_set_tx_cfg(np, i, tx_cfg);
433                 if (err)
434                         return err;
435         }
436
437         for (i = 0; i < 4; i++) {
438                 int err = esr2_set_rx_cfg(np, i, rx_cfg);
439                 if (err)
440                         return err;
441         }
442
443         return 0;
444 }
445
446 static int serdes_init_niu_1g_serdes(struct niu *np)
447 {
448         struct niu_link_config *lp = &np->link_config;
449         u16 pll_cfg, pll_sts;
450         int max_retry = 100;
451         u64 sig, mask, val;
452         u32 tx_cfg, rx_cfg;
453         unsigned long i;
454         int err;
455
456         tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV |
457                   PLL_TX_CFG_RATE_HALF);
458         rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
459                   PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
460                   PLL_RX_CFG_RATE_HALF);
461
462         if (np->port == 0)
463                 rx_cfg |= PLL_RX_CFG_EQ_LP_ADAPTIVE;
464
465         if (lp->loopback_mode == LOOPBACK_PHY) {
466                 u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
467
468                 mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
469                            ESR2_TI_PLL_TEST_CFG_L, test_cfg);
470
471                 tx_cfg |= PLL_TX_CFG_ENTEST;
472                 rx_cfg |= PLL_RX_CFG_ENTEST;
473         }
474
475         /* Initialize PLL for 1G */
476         pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_8X);
477
478         err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
479                          ESR2_TI_PLL_CFG_L, pll_cfg);
480         if (err) {
481                 dev_err(np->device, PFX "NIU Port %d "
482                         "serdes_init_niu_1g_serdes: "
483                         "mdio write to ESR2_TI_PLL_CFG_L failed", np->port);
484                 return err;
485         }
486
487         pll_sts = PLL_CFG_ENPLL;
488
489         err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
490                          ESR2_TI_PLL_STS_L, pll_sts);
491         if (err) {
492                 dev_err(np->device, PFX "NIU Port %d "
493                         "serdes_init_niu_1g_serdes: "
494                         "mdio write to ESR2_TI_PLL_STS_L failed", np->port);
495                 return err;
496         }
497
498         udelay(200);
499
500         /* Initialize all 4 lanes of the SERDES.  */
501         for (i = 0; i < 4; i++) {
502                 err = esr2_set_tx_cfg(np, i, tx_cfg);
503                 if (err)
504                         return err;
505         }
506
507         for (i = 0; i < 4; i++) {
508                 err = esr2_set_rx_cfg(np, i, rx_cfg);
509                 if (err)
510                         return err;
511         }
512
513         switch (np->port) {
514         case 0:
515                 val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0);
516                 mask = val;
517                 break;
518
519         case 1:
520                 val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1);
521                 mask = val;
522                 break;
523
524         default:
525                 return -EINVAL;
526         }
527
528         while (max_retry--) {
529                 sig = nr64(ESR_INT_SIGNALS);
530                 if ((sig & mask) == val)
531                         break;
532
533                 mdelay(500);
534         }
535
536         if ((sig & mask) != val) {
537                 dev_err(np->device, PFX "Port %u signal bits [%08x] are not "
538                         "[%08x]\n", np->port, (int) (sig & mask), (int) val);
539                 return -ENODEV;
540         }
541
542         return 0;
543 }
544
545 static int serdes_init_niu_10g_serdes(struct niu *np)
546 {
547         struct niu_link_config *lp = &np->link_config;
548         u32 tx_cfg, rx_cfg, pll_cfg, pll_sts;
549         int max_retry = 100;
550         u64 sig, mask, val;
551         unsigned long i;
552         int err;
553
554         tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV);
555         rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
556                   PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
557                   PLL_RX_CFG_EQ_LP_ADAPTIVE);
558
559         if (lp->loopback_mode == LOOPBACK_PHY) {
560                 u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
561
562                 mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
563                            ESR2_TI_PLL_TEST_CFG_L, test_cfg);
564
565                 tx_cfg |= PLL_TX_CFG_ENTEST;
566                 rx_cfg |= PLL_RX_CFG_ENTEST;
567         }
568
569         /* Initialize PLL for 10G */
570         pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_10X);
571
572         err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
573                          ESR2_TI_PLL_CFG_L, pll_cfg & 0xffff);
574         if (err) {
575                 dev_err(np->device, PFX "NIU Port %d "
576                         "serdes_init_niu_10g_serdes: "
577                         "mdio write to ESR2_TI_PLL_CFG_L failed", np->port);
578                 return err;
579         }
580
581         pll_sts = PLL_CFG_ENPLL;
582
583         err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
584                          ESR2_TI_PLL_STS_L, pll_sts & 0xffff);
585         if (err) {
586                 dev_err(np->device, PFX "NIU Port %d "
587                         "serdes_init_niu_10g_serdes: "
588                         "mdio write to ESR2_TI_PLL_STS_L failed", np->port);
589                 return err;
590         }
591
592         udelay(200);
593
594         /* Initialize all 4 lanes of the SERDES.  */
595         for (i = 0; i < 4; i++) {
596                 err = esr2_set_tx_cfg(np, i, tx_cfg);
597                 if (err)
598                         return err;
599         }
600
601         for (i = 0; i < 4; i++) {
602                 err = esr2_set_rx_cfg(np, i, rx_cfg);
603                 if (err)
604                         return err;
605         }
606
607         /* check if serdes is ready */
608
609         switch (np->port) {
610         case 0:
611                 mask = ESR_INT_SIGNALS_P0_BITS;
612                 val = (ESR_INT_SRDY0_P0 |
613                        ESR_INT_DET0_P0 |
614                        ESR_INT_XSRDY_P0 |
615                        ESR_INT_XDP_P0_CH3 |
616                        ESR_INT_XDP_P0_CH2 |
617                        ESR_INT_XDP_P0_CH1 |
618                        ESR_INT_XDP_P0_CH0);
619                 break;
620
621         case 1:
622                 mask = ESR_INT_SIGNALS_P1_BITS;
623                 val = (ESR_INT_SRDY0_P1 |
624                        ESR_INT_DET0_P1 |
625                        ESR_INT_XSRDY_P1 |
626                        ESR_INT_XDP_P1_CH3 |
627                        ESR_INT_XDP_P1_CH2 |
628                        ESR_INT_XDP_P1_CH1 |
629                        ESR_INT_XDP_P1_CH0);
630                 break;
631
632         default:
633                 return -EINVAL;
634         }
635
636         while (max_retry--) {
637                 sig = nr64(ESR_INT_SIGNALS);
638                 if ((sig & mask) == val)
639                         break;
640
641                 mdelay(500);
642         }
643
644         if ((sig & mask) != val) {
645                 pr_info(PFX "NIU Port %u signal bits [%08x] are not "
646                         "[%08x] for 10G...trying 1G\n",
647                         np->port, (int) (sig & mask), (int) val);
648
649                 /* 10G failed, try initializing at 1G */
650                 err = serdes_init_niu_1g_serdes(np);
651                 if (!err) {
652                         np->flags &= ~NIU_FLAGS_10G;
653                         np->mac_xcvr = MAC_XCVR_PCS;
654                 }  else {
655                         dev_err(np->device, PFX "Port %u 10G/1G SERDES "
656                                 "Link Failed \n", np->port);
657                         return -ENODEV;
658                 }
659         }
660         return 0;
661 }
662
663 static int esr_read_rxtx_ctrl(struct niu *np, unsigned long chan, u32 *val)
664 {
665         int err;
666
667         err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_CTRL_L(chan));
668         if (err >= 0) {
669                 *val = (err & 0xffff);
670                 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
671                                 ESR_RXTX_CTRL_H(chan));
672                 if (err >= 0)
673                         *val |= ((err & 0xffff) << 16);
674                 err = 0;
675         }
676         return err;
677 }
678
679 static int esr_read_glue0(struct niu *np, unsigned long chan, u32 *val)
680 {
681         int err;
682
683         err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
684                         ESR_GLUE_CTRL0_L(chan));
685         if (err >= 0) {
686                 *val = (err & 0xffff);
687                 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
688                                 ESR_GLUE_CTRL0_H(chan));
689                 if (err >= 0) {
690                         *val |= ((err & 0xffff) << 16);
691                         err = 0;
692                 }
693         }
694         return err;
695 }
696
697 static int esr_read_reset(struct niu *np, u32 *val)
698 {
699         int err;
700
701         err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
702                         ESR_RXTX_RESET_CTRL_L);
703         if (err >= 0) {
704                 *val = (err & 0xffff);
705                 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
706                                 ESR_RXTX_RESET_CTRL_H);
707                 if (err >= 0) {
708                         *val |= ((err & 0xffff) << 16);
709                         err = 0;
710                 }
711         }
712         return err;
713 }
714
715 static int esr_write_rxtx_ctrl(struct niu *np, unsigned long chan, u32 val)
716 {
717         int err;
718
719         err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
720                          ESR_RXTX_CTRL_L(chan), val & 0xffff);
721         if (!err)
722                 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
723                                  ESR_RXTX_CTRL_H(chan), (val >> 16));
724         return err;
725 }
726
727 static int esr_write_glue0(struct niu *np, unsigned long chan, u32 val)
728 {
729         int err;
730
731         err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
732                         ESR_GLUE_CTRL0_L(chan), val & 0xffff);
733         if (!err)
734                 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
735                                  ESR_GLUE_CTRL0_H(chan), (val >> 16));
736         return err;
737 }
738
739 static int esr_reset(struct niu *np)
740 {
741         u32 reset;
742         int err;
743
744         err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
745                          ESR_RXTX_RESET_CTRL_L, 0x0000);
746         if (err)
747                 return err;
748         err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
749                          ESR_RXTX_RESET_CTRL_H, 0xffff);
750         if (err)
751                 return err;
752         udelay(200);
753
754         err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
755                          ESR_RXTX_RESET_CTRL_L, 0xffff);
756         if (err)
757                 return err;
758         udelay(200);
759
760         err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
761                          ESR_RXTX_RESET_CTRL_H, 0x0000);
762         if (err)
763                 return err;
764         udelay(200);
765
766         err = esr_read_reset(np, &reset);
767         if (err)
768                 return err;
769         if (reset != 0) {
770                 dev_err(np->device, PFX "Port %u ESR_RESET "
771                         "did not clear [%08x]\n",
772                         np->port, reset);
773                 return -ENODEV;
774         }
775
776         return 0;
777 }
778
779 static int serdes_init_10g(struct niu *np)
780 {
781         struct niu_link_config *lp = &np->link_config;
782         unsigned long ctrl_reg, test_cfg_reg, i;
783         u64 ctrl_val, test_cfg_val, sig, mask, val;
784         int err;
785
786         switch (np->port) {
787         case 0:
788                 ctrl_reg = ENET_SERDES_0_CTRL_CFG;
789                 test_cfg_reg = ENET_SERDES_0_TEST_CFG;
790                 break;
791         case 1:
792                 ctrl_reg = ENET_SERDES_1_CTRL_CFG;
793                 test_cfg_reg = ENET_SERDES_1_TEST_CFG;
794                 break;
795
796         default:
797                 return -EINVAL;
798         }
799         ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
800                     ENET_SERDES_CTRL_SDET_1 |
801                     ENET_SERDES_CTRL_SDET_2 |
802                     ENET_SERDES_CTRL_SDET_3 |
803                     (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
804                     (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
805                     (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
806                     (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
807                     (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
808                     (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
809                     (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
810                     (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
811         test_cfg_val = 0;
812
813         if (lp->loopback_mode == LOOPBACK_PHY) {
814                 test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
815                                   ENET_SERDES_TEST_MD_0_SHIFT) |
816                                  (ENET_TEST_MD_PAD_LOOPBACK <<
817                                   ENET_SERDES_TEST_MD_1_SHIFT) |
818                                  (ENET_TEST_MD_PAD_LOOPBACK <<
819                                   ENET_SERDES_TEST_MD_2_SHIFT) |
820                                  (ENET_TEST_MD_PAD_LOOPBACK <<
821                                   ENET_SERDES_TEST_MD_3_SHIFT));
822         }
823
824         nw64(ctrl_reg, ctrl_val);
825         nw64(test_cfg_reg, test_cfg_val);
826
827         /* Initialize all 4 lanes of the SERDES.  */
828         for (i = 0; i < 4; i++) {
829                 u32 rxtx_ctrl, glue0;
830
831                 err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
832                 if (err)
833                         return err;
834                 err = esr_read_glue0(np, i, &glue0);
835                 if (err)
836                         return err;
837
838                 rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
839                 rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
840                               (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
841
842                 glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
843                            ESR_GLUE_CTRL0_THCNT |
844                            ESR_GLUE_CTRL0_BLTIME);
845                 glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
846                           (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
847                           (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
848                           (BLTIME_300_CYCLES <<
849                            ESR_GLUE_CTRL0_BLTIME_SHIFT));
850
851                 err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
852                 if (err)
853                         return err;
854                 err = esr_write_glue0(np, i, glue0);
855                 if (err)
856                         return err;
857         }
858
859         err = esr_reset(np);
860         if (err)
861                 return err;
862
863         sig = nr64(ESR_INT_SIGNALS);
864         switch (np->port) {
865         case 0:
866                 mask = ESR_INT_SIGNALS_P0_BITS;
867                 val = (ESR_INT_SRDY0_P0 |
868                        ESR_INT_DET0_P0 |
869                        ESR_INT_XSRDY_P0 |
870                        ESR_INT_XDP_P0_CH3 |
871                        ESR_INT_XDP_P0_CH2 |
872                        ESR_INT_XDP_P0_CH1 |
873                        ESR_INT_XDP_P0_CH0);
874                 break;
875
876         case 1:
877                 mask = ESR_INT_SIGNALS_P1_BITS;
878                 val = (ESR_INT_SRDY0_P1 |
879                        ESR_INT_DET0_P1 |
880                        ESR_INT_XSRDY_P1 |
881                        ESR_INT_XDP_P1_CH3 |
882                        ESR_INT_XDP_P1_CH2 |
883                        ESR_INT_XDP_P1_CH1 |
884                        ESR_INT_XDP_P1_CH0);
885                 break;
886
887         default:
888                 return -EINVAL;
889         }
890
891         if ((sig & mask) != val) {
892                 if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
893                         np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
894                         return 0;
895                 }
896                 dev_err(np->device, PFX "Port %u signal bits [%08x] are not "
897                         "[%08x]\n", np->port, (int) (sig & mask), (int) val);
898                 return -ENODEV;
899         }
900         if (np->flags & NIU_FLAGS_HOTPLUG_PHY)
901                 np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT;
902         return 0;
903 }
904
905 static int serdes_init_1g(struct niu *np)
906 {
907         u64 val;
908
909         val = nr64(ENET_SERDES_1_PLL_CFG);
910         val &= ~ENET_SERDES_PLL_FBDIV2;
911         switch (np->port) {
912         case 0:
913                 val |= ENET_SERDES_PLL_HRATE0;
914                 break;
915         case 1:
916                 val |= ENET_SERDES_PLL_HRATE1;
917                 break;
918         case 2:
919                 val |= ENET_SERDES_PLL_HRATE2;
920                 break;
921         case 3:
922                 val |= ENET_SERDES_PLL_HRATE3;
923                 break;
924         default:
925                 return -EINVAL;
926         }
927         nw64(ENET_SERDES_1_PLL_CFG, val);
928
929         return 0;
930 }
931
932 static int serdes_init_1g_serdes(struct niu *np)
933 {
934         struct niu_link_config *lp = &np->link_config;
935         unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i;
936         u64 ctrl_val, test_cfg_val, sig, mask, val;
937         int err;
938         u64 reset_val, val_rd;
939
940         val = ENET_SERDES_PLL_HRATE0 | ENET_SERDES_PLL_HRATE1 |
941                 ENET_SERDES_PLL_HRATE2 | ENET_SERDES_PLL_HRATE3 |
942                 ENET_SERDES_PLL_FBDIV0;
943         switch (np->port) {
944         case 0:
945                 reset_val =  ENET_SERDES_RESET_0;
946                 ctrl_reg = ENET_SERDES_0_CTRL_CFG;
947                 test_cfg_reg = ENET_SERDES_0_TEST_CFG;
948                 pll_cfg = ENET_SERDES_0_PLL_CFG;
949                 break;
950         case 1:
951                 reset_val =  ENET_SERDES_RESET_1;
952                 ctrl_reg = ENET_SERDES_1_CTRL_CFG;
953                 test_cfg_reg = ENET_SERDES_1_TEST_CFG;
954                 pll_cfg = ENET_SERDES_1_PLL_CFG;
955                 break;
956
957         default:
958                 return -EINVAL;
959         }
960         ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
961                     ENET_SERDES_CTRL_SDET_1 |
962                     ENET_SERDES_CTRL_SDET_2 |
963                     ENET_SERDES_CTRL_SDET_3 |
964                     (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
965                     (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
966                     (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
967                     (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
968                     (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
969                     (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
970                     (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
971                     (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
972         test_cfg_val = 0;
973
974         if (lp->loopback_mode == LOOPBACK_PHY) {
975                 test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
976                                   ENET_SERDES_TEST_MD_0_SHIFT) |
977                                  (ENET_TEST_MD_PAD_LOOPBACK <<
978                                   ENET_SERDES_TEST_MD_1_SHIFT) |
979                                  (ENET_TEST_MD_PAD_LOOPBACK <<
980                                   ENET_SERDES_TEST_MD_2_SHIFT) |
981                                  (ENET_TEST_MD_PAD_LOOPBACK <<
982                                   ENET_SERDES_TEST_MD_3_SHIFT));
983         }
984
985         nw64(ENET_SERDES_RESET, reset_val);
986         mdelay(20);
987         val_rd = nr64(ENET_SERDES_RESET);
988         val_rd &= ~reset_val;
989         nw64(pll_cfg, val);
990         nw64(ctrl_reg, ctrl_val);
991         nw64(test_cfg_reg, test_cfg_val);
992         nw64(ENET_SERDES_RESET, val_rd);
993         mdelay(2000);
994
995         /* Initialize all 4 lanes of the SERDES.  */
996         for (i = 0; i < 4; i++) {
997                 u32 rxtx_ctrl, glue0;
998
999                 err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
1000                 if (err)
1001                         return err;
1002                 err = esr_read_glue0(np, i, &glue0);
1003                 if (err)
1004                         return err;
1005
1006                 rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
1007                 rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
1008                               (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
1009
1010                 glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
1011                            ESR_GLUE_CTRL0_THCNT |
1012                            ESR_GLUE_CTRL0_BLTIME);
1013                 glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
1014                           (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
1015                           (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
1016                           (BLTIME_300_CYCLES <<
1017                            ESR_GLUE_CTRL0_BLTIME_SHIFT));
1018
1019                 err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
1020                 if (err)
1021                         return err;
1022                 err = esr_write_glue0(np, i, glue0);
1023                 if (err)
1024                         return err;
1025         }
1026
1027
1028         sig = nr64(ESR_INT_SIGNALS);
1029         switch (np->port) {
1030         case 0:
1031                 val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0);
1032                 mask = val;
1033                 break;
1034
1035         case 1:
1036                 val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1);
1037                 mask = val;
1038                 break;
1039
1040         default:
1041                 return -EINVAL;
1042         }
1043
1044         if ((sig & mask) != val) {
1045                 dev_err(np->device, PFX "Port %u signal bits [%08x] are not "
1046                         "[%08x]\n", np->port, (int) (sig & mask), (int) val);
1047                 return -ENODEV;
1048         }
1049
1050         return 0;
1051 }
1052
1053 static int link_status_1g_serdes(struct niu *np, int *link_up_p)
1054 {
1055         struct niu_link_config *lp = &np->link_config;
1056         int link_up;
1057         u64 val;
1058         u16 current_speed;
1059         unsigned long flags;
1060         u8 current_duplex;
1061
1062         link_up = 0;
1063         current_speed = SPEED_INVALID;
1064         current_duplex = DUPLEX_INVALID;
1065
1066         spin_lock_irqsave(&np->lock, flags);
1067
1068         val = nr64_pcs(PCS_MII_STAT);
1069
1070         if (val & PCS_MII_STAT_LINK_STATUS) {
1071                 link_up = 1;
1072                 current_speed = SPEED_1000;
1073                 current_duplex = DUPLEX_FULL;
1074         }
1075
1076         lp->active_speed = current_speed;
1077         lp->active_duplex = current_duplex;
1078         spin_unlock_irqrestore(&np->lock, flags);
1079
1080         *link_up_p = link_up;
1081         return 0;
1082 }
1083
1084 static int link_status_10g_serdes(struct niu *np, int *link_up_p)
1085 {
1086         unsigned long flags;
1087         struct niu_link_config *lp = &np->link_config;
1088         int link_up = 0;
1089         int link_ok = 1;
1090         u64 val, val2;
1091         u16 current_speed;
1092         u8 current_duplex;
1093
1094         if (!(np->flags & NIU_FLAGS_10G))
1095                 return link_status_1g_serdes(np, link_up_p);
1096
1097         current_speed = SPEED_INVALID;
1098         current_duplex = DUPLEX_INVALID;
1099         spin_lock_irqsave(&np->lock, flags);
1100
1101         val = nr64_xpcs(XPCS_STATUS(0));
1102         val2 = nr64_mac(XMAC_INTER2);
1103         if (val2 & 0x01000000)
1104                 link_ok = 0;
1105
1106         if ((val & 0x1000ULL) && link_ok) {
1107                 link_up = 1;
1108                 current_speed = SPEED_10000;
1109                 current_duplex = DUPLEX_FULL;
1110         }
1111         lp->active_speed = current_speed;
1112         lp->active_duplex = current_duplex;
1113         spin_unlock_irqrestore(&np->lock, flags);
1114         *link_up_p = link_up;
1115         return 0;
1116 }
1117
1118 static int link_status_1g_rgmii(struct niu *np, int *link_up_p)
1119 {
1120         struct niu_link_config *lp = &np->link_config;
1121         u16 current_speed, bmsr;
1122         unsigned long flags;
1123         u8 current_duplex;
1124         int err, link_up;
1125
1126         link_up = 0;
1127         current_speed = SPEED_INVALID;
1128         current_duplex = DUPLEX_INVALID;
1129
1130         spin_lock_irqsave(&np->lock, flags);
1131
1132         err = -EINVAL;
1133
1134         err = mii_read(np, np->phy_addr, MII_BMSR);
1135         if (err < 0)
1136                 goto out;
1137
1138         bmsr = err;
1139         if (bmsr & BMSR_LSTATUS) {
1140                 u16 adv, lpa, common, estat;
1141
1142                 err = mii_read(np, np->phy_addr, MII_ADVERTISE);
1143                 if (err < 0)
1144                         goto out;
1145                 adv = err;
1146
1147                 err = mii_read(np, np->phy_addr, MII_LPA);
1148                 if (err < 0)
1149                         goto out;
1150                 lpa = err;
1151
1152                 common = adv & lpa;
1153
1154                 err = mii_read(np, np->phy_addr, MII_ESTATUS);
1155                 if (err < 0)
1156                         goto out;
1157                 estat = err;
1158                 link_up = 1;
1159                 current_speed = SPEED_1000;
1160                 current_duplex = DUPLEX_FULL;
1161
1162         }
1163         lp->active_speed = current_speed;
1164         lp->active_duplex = current_duplex;
1165         err = 0;
1166
1167 out:
1168         spin_unlock_irqrestore(&np->lock, flags);
1169
1170         *link_up_p = link_up;
1171         return err;
1172 }
1173
1174 static int bcm8704_reset(struct niu *np)
1175 {
1176         int err, limit;
1177
1178         err = mdio_read(np, np->phy_addr,
1179                         BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
1180         if (err < 0)
1181                 return err;
1182         err |= BMCR_RESET;
1183         err = mdio_write(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
1184                          MII_BMCR, err);
1185         if (err)
1186                 return err;
1187
1188         limit = 1000;
1189         while (--limit >= 0) {
1190                 err = mdio_read(np, np->phy_addr,
1191                                 BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
1192                 if (err < 0)
1193                         return err;
1194                 if (!(err & BMCR_RESET))
1195                         break;
1196         }
1197         if (limit < 0) {
1198                 dev_err(np->device, PFX "Port %u PHY will not reset "
1199                         "(bmcr=%04x)\n", np->port, (err & 0xffff));
1200                 return -ENODEV;
1201         }
1202         return 0;
1203 }
1204
1205 /* When written, certain PHY registers need to be read back twice
1206  * in order for the bits to settle properly.
1207  */
1208 static int bcm8704_user_dev3_readback(struct niu *np, int reg)
1209 {
1210         int err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg);
1211         if (err < 0)
1212                 return err;
1213         err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg);
1214         if (err < 0)
1215                 return err;
1216         return 0;
1217 }
1218
1219 static int bcm8706_init_user_dev3(struct niu *np)
1220 {
1221         int err;
1222
1223
1224         err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1225                         BCM8704_USER_OPT_DIGITAL_CTRL);
1226         if (err < 0)
1227                 return err;
1228         err &= ~USER_ODIG_CTRL_GPIOS;
1229         err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT);
1230         err |=  USER_ODIG_CTRL_RESV2;
1231         err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1232                          BCM8704_USER_OPT_DIGITAL_CTRL, err);
1233         if (err)
1234                 return err;
1235
1236         mdelay(1000);
1237
1238         return 0;
1239 }
1240
1241 static int bcm8704_init_user_dev3(struct niu *np)
1242 {
1243         int err;
1244
1245         err = mdio_write(np, np->phy_addr,
1246                          BCM8704_USER_DEV3_ADDR, BCM8704_USER_CONTROL,
1247                          (USER_CONTROL_OPTXRST_LVL |
1248                           USER_CONTROL_OPBIASFLT_LVL |
1249                           USER_CONTROL_OBTMPFLT_LVL |
1250                           USER_CONTROL_OPPRFLT_LVL |
1251                           USER_CONTROL_OPTXFLT_LVL |
1252                           USER_CONTROL_OPRXLOS_LVL |
1253                           USER_CONTROL_OPRXFLT_LVL |
1254                           USER_CONTROL_OPTXON_LVL |
1255                           (0x3f << USER_CONTROL_RES1_SHIFT)));
1256         if (err)
1257                 return err;
1258
1259         err = mdio_write(np, np->phy_addr,
1260                          BCM8704_USER_DEV3_ADDR, BCM8704_USER_PMD_TX_CONTROL,
1261                          (USER_PMD_TX_CTL_XFP_CLKEN |
1262                           (1 << USER_PMD_TX_CTL_TX_DAC_TXD_SH) |
1263                           (2 << USER_PMD_TX_CTL_TX_DAC_TXCK_SH) |
1264                           USER_PMD_TX_CTL_TSCK_LPWREN));
1265         if (err)
1266                 return err;
1267
1268         err = bcm8704_user_dev3_readback(np, BCM8704_USER_CONTROL);
1269         if (err)
1270                 return err;
1271         err = bcm8704_user_dev3_readback(np, BCM8704_USER_PMD_TX_CONTROL);
1272         if (err)
1273                 return err;
1274
1275         err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1276                         BCM8704_USER_OPT_DIGITAL_CTRL);
1277         if (err < 0)
1278                 return err;
1279         err &= ~USER_ODIG_CTRL_GPIOS;
1280         err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT);
1281         err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1282                          BCM8704_USER_OPT_DIGITAL_CTRL, err);
1283         if (err)
1284                 return err;
1285
1286         mdelay(1000);
1287
1288         return 0;
1289 }
1290
1291 static int mrvl88x2011_act_led(struct niu *np, int val)
1292 {
1293         int     err;
1294
1295         err  = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
1296                 MRVL88X2011_LED_8_TO_11_CTL);
1297         if (err < 0)
1298                 return err;
1299
1300         err &= ~MRVL88X2011_LED(MRVL88X2011_LED_ACT,MRVL88X2011_LED_CTL_MASK);
1301         err |=  MRVL88X2011_LED(MRVL88X2011_LED_ACT,val);
1302
1303         return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
1304                           MRVL88X2011_LED_8_TO_11_CTL, err);
1305 }
1306
1307 static int mrvl88x2011_led_blink_rate(struct niu *np, int rate)
1308 {
1309         int     err;
1310
1311         err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
1312                         MRVL88X2011_LED_BLINK_CTL);
1313         if (err >= 0) {
1314                 err &= ~MRVL88X2011_LED_BLKRATE_MASK;
1315                 err |= (rate << 4);
1316
1317                 err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
1318                                  MRVL88X2011_LED_BLINK_CTL, err);
1319         }
1320
1321         return err;
1322 }
1323
1324 static int xcvr_init_10g_mrvl88x2011(struct niu *np)
1325 {
1326         int     err;
1327
1328         /* Set LED functions */
1329         err = mrvl88x2011_led_blink_rate(np, MRVL88X2011_LED_BLKRATE_134MS);
1330         if (err)
1331                 return err;
1332
1333         /* led activity */
1334         err = mrvl88x2011_act_led(np, MRVL88X2011_LED_CTL_OFF);
1335         if (err)
1336                 return err;
1337
1338         err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
1339                         MRVL88X2011_GENERAL_CTL);
1340         if (err < 0)
1341                 return err;
1342
1343         err |= MRVL88X2011_ENA_XFPREFCLK;
1344
1345         err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
1346                          MRVL88X2011_GENERAL_CTL, err);
1347         if (err < 0)
1348                 return err;
1349
1350         err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1351                         MRVL88X2011_PMA_PMD_CTL_1);
1352         if (err < 0)
1353                 return err;
1354
1355         if (np->link_config.loopback_mode == LOOPBACK_MAC)
1356                 err |= MRVL88X2011_LOOPBACK;
1357         else
1358                 err &= ~MRVL88X2011_LOOPBACK;
1359
1360         err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1361                          MRVL88X2011_PMA_PMD_CTL_1, err);
1362         if (err < 0)
1363                 return err;
1364
1365         /* Enable PMD  */
1366         return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1367                           MRVL88X2011_10G_PMD_TX_DIS, MRVL88X2011_ENA_PMDTX);
1368 }
1369
1370
1371 static int xcvr_diag_bcm870x(struct niu *np)
1372 {
1373         u16 analog_stat0, tx_alarm_status;
1374         int err = 0;
1375
1376 #if 1
1377         err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
1378                         MII_STAT1000);
1379         if (err < 0)
1380                 return err;
1381         pr_info(PFX "Port %u PMA_PMD(MII_STAT1000) [%04x]\n",
1382                 np->port, err);
1383
1384         err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 0x20);
1385         if (err < 0)
1386                 return err;
1387         pr_info(PFX "Port %u USER_DEV3(0x20) [%04x]\n",
1388                 np->port, err);
1389
1390         err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
1391                         MII_NWAYTEST);
1392         if (err < 0)
1393                 return err;
1394         pr_info(PFX "Port %u PHYXS(MII_NWAYTEST) [%04x]\n",
1395                 np->port, err);
1396 #endif
1397
1398         /* XXX dig this out it might not be so useful XXX */
1399         err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1400                         BCM8704_USER_ANALOG_STATUS0);
1401         if (err < 0)
1402                 return err;
1403         err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1404                         BCM8704_USER_ANALOG_STATUS0);
1405         if (err < 0)
1406                 return err;
1407         analog_stat0 = err;
1408
1409         err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1410                         BCM8704_USER_TX_ALARM_STATUS);
1411         if (err < 0)
1412                 return err;
1413         err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1414                         BCM8704_USER_TX_ALARM_STATUS);
1415         if (err < 0)
1416                 return err;
1417         tx_alarm_status = err;
1418
1419         if (analog_stat0 != 0x03fc) {
1420                 if ((analog_stat0 == 0x43bc) && (tx_alarm_status != 0)) {
1421                         pr_info(PFX "Port %u cable not connected "
1422                                 "or bad cable.\n", np->port);
1423                 } else if (analog_stat0 == 0x639c) {
1424                         pr_info(PFX "Port %u optical module is bad "
1425                                 "or missing.\n", np->port);
1426                 }
1427         }
1428
1429         return 0;
1430 }
1431
1432 static int xcvr_10g_set_lb_bcm870x(struct niu *np)
1433 {
1434         struct niu_link_config *lp = &np->link_config;
1435         int err;
1436
1437         err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
1438                         MII_BMCR);
1439         if (err < 0)
1440                 return err;
1441
1442         err &= ~BMCR_LOOPBACK;
1443
1444         if (lp->loopback_mode == LOOPBACK_MAC)
1445                 err |= BMCR_LOOPBACK;
1446
1447         err = mdio_write(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
1448                          MII_BMCR, err);
1449         if (err)
1450                 return err;
1451
1452         return 0;
1453 }
1454
1455 static int xcvr_init_10g_bcm8706(struct niu *np)
1456 {
1457         int err = 0;
1458         u64 val;
1459
1460         if ((np->flags & NIU_FLAGS_HOTPLUG_PHY) &&
1461             (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) == 0)
1462                         return err;
1463
1464         val = nr64_mac(XMAC_CONFIG);
1465         val &= ~XMAC_CONFIG_LED_POLARITY;
1466         val |= XMAC_CONFIG_FORCE_LED_ON;
1467         nw64_mac(XMAC_CONFIG, val);
1468
1469         val = nr64(MIF_CONFIG);
1470         val |= MIF_CONFIG_INDIRECT_MODE;
1471         nw64(MIF_CONFIG, val);
1472
1473         err = bcm8704_reset(np);
1474         if (err)
1475                 return err;
1476
1477         err = xcvr_10g_set_lb_bcm870x(np);
1478         if (err)
1479                 return err;
1480
1481         err = bcm8706_init_user_dev3(np);
1482         if (err)
1483                 return err;
1484
1485         err = xcvr_diag_bcm870x(np);
1486         if (err)
1487                 return err;
1488
1489         return 0;
1490 }
1491
1492 static int xcvr_init_10g_bcm8704(struct niu *np)
1493 {
1494         int err;
1495
1496         err = bcm8704_reset(np);
1497         if (err)
1498                 return err;
1499
1500         err = bcm8704_init_user_dev3(np);
1501         if (err)
1502                 return err;
1503
1504         err = xcvr_10g_set_lb_bcm870x(np);
1505         if (err)
1506                 return err;
1507
1508         err =  xcvr_diag_bcm870x(np);
1509         if (err)
1510                 return err;
1511
1512         return 0;
1513 }
1514
1515 static int xcvr_init_10g(struct niu *np)
1516 {
1517         int phy_id, err;
1518         u64 val;
1519
1520         val = nr64_mac(XMAC_CONFIG);
1521         val &= ~XMAC_CONFIG_LED_POLARITY;
1522         val |= XMAC_CONFIG_FORCE_LED_ON;
1523         nw64_mac(XMAC_CONFIG, val);
1524
1525         /* XXX shared resource, lock parent XXX */
1526         val = nr64(MIF_CONFIG);
1527         val |= MIF_CONFIG_INDIRECT_MODE;
1528         nw64(MIF_CONFIG, val);
1529
1530         phy_id = phy_decode(np->parent->port_phy, np->port);
1531         phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port];
1532
1533         /* handle different phy types */
1534         switch (phy_id & NIU_PHY_ID_MASK) {
1535         case NIU_PHY_ID_MRVL88X2011:
1536                 err = xcvr_init_10g_mrvl88x2011(np);
1537                 break;
1538
1539         default: /* bcom 8704 */
1540                 err = xcvr_init_10g_bcm8704(np);
1541                 break;
1542         }
1543
1544         return 0;
1545 }
1546
1547 static int mii_reset(struct niu *np)
1548 {
1549         int limit, err;
1550
1551         err = mii_write(np, np->phy_addr, MII_BMCR, BMCR_RESET);
1552         if (err)
1553                 return err;
1554
1555         limit = 1000;
1556         while (--limit >= 0) {
1557                 udelay(500);
1558                 err = mii_read(np, np->phy_addr, MII_BMCR);
1559                 if (err < 0)
1560                         return err;
1561                 if (!(err & BMCR_RESET))
1562                         break;
1563         }
1564         if (limit < 0) {
1565                 dev_err(np->device, PFX "Port %u MII would not reset, "
1566                         "bmcr[%04x]\n", np->port, err);
1567                 return -ENODEV;
1568         }
1569
1570         return 0;
1571 }
1572
1573 static int xcvr_init_1g_rgmii(struct niu *np)
1574 {
1575         int err;
1576         u64 val;
1577         u16 bmcr, bmsr, estat;
1578
1579         val = nr64(MIF_CONFIG);
1580         val &= ~MIF_CONFIG_INDIRECT_MODE;
1581         nw64(MIF_CONFIG, val);
1582
1583         err = mii_reset(np);
1584         if (err)
1585                 return err;
1586
1587         err = mii_read(np, np->phy_addr, MII_BMSR);
1588         if (err < 0)
1589                 return err;
1590         bmsr = err;
1591
1592         estat = 0;
1593         if (bmsr & BMSR_ESTATEN) {
1594                 err = mii_read(np, np->phy_addr, MII_ESTATUS);
1595                 if (err < 0)
1596                         return err;
1597                 estat = err;
1598         }
1599
1600         bmcr = 0;
1601         err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
1602         if (err)
1603                 return err;
1604
1605         if (bmsr & BMSR_ESTATEN) {
1606                 u16 ctrl1000 = 0;
1607
1608                 if (estat & ESTATUS_1000_TFULL)
1609                         ctrl1000 |= ADVERTISE_1000FULL;
1610                 err = mii_write(np, np->phy_addr, MII_CTRL1000, ctrl1000);
1611                 if (err)
1612                         return err;
1613         }
1614
1615         bmcr = (BMCR_SPEED1000 | BMCR_FULLDPLX);
1616
1617         err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
1618         if (err)
1619                 return err;
1620
1621         err = mii_read(np, np->phy_addr, MII_BMCR);
1622         if (err < 0)
1623                 return err;
1624         bmcr = mii_read(np, np->phy_addr, MII_BMCR);
1625
1626         err = mii_read(np, np->phy_addr, MII_BMSR);
1627         if (err < 0)
1628                 return err;
1629
1630         return 0;
1631 }
1632
1633 static int mii_init_common(struct niu *np)
1634 {
1635         struct niu_link_config *lp = &np->link_config;
1636         u16 bmcr, bmsr, adv, estat;
1637         int err;
1638
1639         err = mii_reset(np);
1640         if (err)
1641                 return err;
1642
1643         err = mii_read(np, np->phy_addr, MII_BMSR);
1644         if (err < 0)
1645                 return err;
1646         bmsr = err;
1647
1648         estat = 0;
1649         if (bmsr & BMSR_ESTATEN) {
1650                 err = mii_read(np, np->phy_addr, MII_ESTATUS);
1651                 if (err < 0)
1652                         return err;
1653                 estat = err;
1654         }
1655
1656         bmcr = 0;
1657         err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
1658         if (err)
1659                 return err;
1660
1661         if (lp->loopback_mode == LOOPBACK_MAC) {
1662                 bmcr |= BMCR_LOOPBACK;
1663                 if (lp->active_speed == SPEED_1000)
1664                         bmcr |= BMCR_SPEED1000;
1665                 if (lp->active_duplex == DUPLEX_FULL)
1666                         bmcr |= BMCR_FULLDPLX;
1667         }
1668
1669         if (lp->loopback_mode == LOOPBACK_PHY) {
1670                 u16 aux;
1671
1672                 aux = (BCM5464R_AUX_CTL_EXT_LB |
1673                        BCM5464R_AUX_CTL_WRITE_1);
1674                 err = mii_write(np, np->phy_addr, BCM5464R_AUX_CTL, aux);
1675                 if (err)
1676                         return err;
1677         }
1678
1679         /* XXX configurable XXX */
1680         /* XXX for now don't advertise half-duplex or asym pause... XXX */
1681         adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1682         if (bmsr & BMSR_10FULL)
1683                 adv |= ADVERTISE_10FULL;
1684         if (bmsr & BMSR_100FULL)
1685                 adv |= ADVERTISE_100FULL;
1686         err = mii_write(np, np->phy_addr, MII_ADVERTISE, adv);
1687         if (err)
1688                 return err;
1689
1690         if (bmsr & BMSR_ESTATEN) {
1691                 u16 ctrl1000 = 0;
1692
1693                 if (estat & ESTATUS_1000_TFULL)
1694                         ctrl1000 |= ADVERTISE_1000FULL;
1695                 err = mii_write(np, np->phy_addr, MII_CTRL1000, ctrl1000);
1696                 if (err)
1697                         return err;
1698         }
1699         bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
1700
1701         err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
1702         if (err)
1703                 return err;
1704
1705         err = mii_read(np, np->phy_addr, MII_BMCR);
1706         if (err < 0)
1707                 return err;
1708         err = mii_read(np, np->phy_addr, MII_BMSR);
1709         if (err < 0)
1710                 return err;
1711 #if 0
1712         pr_info(PFX "Port %u after MII init bmcr[%04x] bmsr[%04x]\n",
1713                 np->port, bmcr, bmsr);
1714 #endif
1715
1716         return 0;
1717 }
1718
1719 static int xcvr_init_1g(struct niu *np)
1720 {
1721         u64 val;
1722
1723         /* XXX shared resource, lock parent XXX */
1724         val = nr64(MIF_CONFIG);
1725         val &= ~MIF_CONFIG_INDIRECT_MODE;
1726         nw64(MIF_CONFIG, val);
1727
1728         return mii_init_common(np);
1729 }
1730
1731 static int niu_xcvr_init(struct niu *np)
1732 {
1733         const struct niu_phy_ops *ops = np->phy_ops;
1734         int err;
1735
1736         err = 0;
1737         if (ops->xcvr_init)
1738                 err = ops->xcvr_init(np);
1739
1740         return err;
1741 }
1742
1743 static int niu_serdes_init(struct niu *np)
1744 {
1745         const struct niu_phy_ops *ops = np->phy_ops;
1746         int err;
1747
1748         err = 0;
1749         if (ops->serdes_init)
1750                 err = ops->serdes_init(np);
1751
1752         return err;
1753 }
1754
1755 static void niu_init_xif(struct niu *);
1756 static void niu_handle_led(struct niu *, int status);
1757
1758 static int niu_link_status_common(struct niu *np, int link_up)
1759 {
1760         struct niu_link_config *lp = &np->link_config;
1761         struct net_device *dev = np->dev;
1762         unsigned long flags;
1763
1764         if (!netif_carrier_ok(dev) && link_up) {
1765                 niuinfo(LINK, "%s: Link is up at %s, %s duplex\n",
1766                        dev->name,
1767                        (lp->active_speed == SPEED_10000 ?
1768                         "10Gb/sec" :
1769                         (lp->active_speed == SPEED_1000 ?
1770                          "1Gb/sec" :
1771                          (lp->active_speed == SPEED_100 ?
1772                           "100Mbit/sec" : "10Mbit/sec"))),
1773                        (lp->active_duplex == DUPLEX_FULL ?
1774                         "full" : "half"));
1775
1776                 spin_lock_irqsave(&np->lock, flags);
1777                 niu_init_xif(np);
1778                 niu_handle_led(np, 1);
1779                 spin_unlock_irqrestore(&np->lock, flags);
1780
1781                 netif_carrier_on(dev);
1782         } else if (netif_carrier_ok(dev) && !link_up) {
1783                 niuwarn(LINK, "%s: Link is down\n", dev->name);
1784                 spin_lock_irqsave(&np->lock, flags);
1785                 niu_handle_led(np, 0);
1786                 spin_unlock_irqrestore(&np->lock, flags);
1787                 netif_carrier_off(dev);
1788         }
1789
1790         return 0;
1791 }
1792
1793 static int link_status_10g_mrvl(struct niu *np, int *link_up_p)
1794 {
1795         int err, link_up, pma_status, pcs_status;
1796
1797         link_up = 0;
1798
1799         err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1800                         MRVL88X2011_10G_PMD_STATUS_2);
1801         if (err < 0)
1802                 goto out;
1803
1804         /* Check PMA/PMD Register: 1.0001.2 == 1 */
1805         err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1806                         MRVL88X2011_PMA_PMD_STATUS_1);
1807         if (err < 0)
1808                 goto out;
1809
1810         pma_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0);
1811
1812         /* Check PMC Register : 3.0001.2 == 1: read twice */
1813         err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
1814                         MRVL88X2011_PMA_PMD_STATUS_1);
1815         if (err < 0)
1816                 goto out;
1817
1818         err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
1819                         MRVL88X2011_PMA_PMD_STATUS_1);
1820         if (err < 0)
1821                 goto out;
1822
1823         pcs_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0);
1824
1825         /* Check XGXS Register : 4.0018.[0-3,12] */
1826         err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV4_ADDR,
1827                         MRVL88X2011_10G_XGXS_LANE_STAT);
1828         if (err < 0)
1829                 goto out;
1830
1831         if (err == (PHYXS_XGXS_LANE_STAT_ALINGED | PHYXS_XGXS_LANE_STAT_LANE3 |
1832                     PHYXS_XGXS_LANE_STAT_LANE2 | PHYXS_XGXS_LANE_STAT_LANE1 |
1833                     PHYXS_XGXS_LANE_STAT_LANE0 | PHYXS_XGXS_LANE_STAT_MAGIC |
1834                     0x800))
1835                 link_up = (pma_status && pcs_status) ? 1 : 0;
1836
1837         np->link_config.active_speed = SPEED_10000;
1838         np->link_config.active_duplex = DUPLEX_FULL;
1839         err = 0;
1840 out:
1841         mrvl88x2011_act_led(np, (link_up ?
1842                                  MRVL88X2011_LED_CTL_PCS_ACT :
1843                                  MRVL88X2011_LED_CTL_OFF));
1844
1845         *link_up_p = link_up;
1846         return err;
1847 }
1848
1849 static int link_status_10g_bcm8706(struct niu *np, int *link_up_p)
1850 {
1851         int err, link_up;
1852         link_up = 0;
1853
1854         err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
1855                         BCM8704_PMD_RCV_SIGDET);
1856         if (err < 0)
1857                 goto out;
1858         if (!(err & PMD_RCV_SIGDET_GLOBAL)) {
1859                 err = 0;
1860                 goto out;
1861         }
1862
1863         err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
1864                         BCM8704_PCS_10G_R_STATUS);
1865         if (err < 0)
1866                 goto out;
1867
1868         if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) {
1869                 err = 0;
1870                 goto out;
1871         }
1872
1873         err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
1874                         BCM8704_PHYXS_XGXS_LANE_STAT);
1875         if (err < 0)
1876                 goto out;
1877         if (err != (PHYXS_XGXS_LANE_STAT_ALINGED |
1878                     PHYXS_XGXS_LANE_STAT_MAGIC |
1879                     PHYXS_XGXS_LANE_STAT_PATTEST |
1880                     PHYXS_XGXS_LANE_STAT_LANE3 |
1881                     PHYXS_XGXS_LANE_STAT_LANE2 |
1882                     PHYXS_XGXS_LANE_STAT_LANE1 |
1883                     PHYXS_XGXS_LANE_STAT_LANE0)) {
1884                 err = 0;
1885                 np->link_config.active_speed = SPEED_INVALID;
1886                 np->link_config.active_duplex = DUPLEX_INVALID;
1887                 goto out;
1888         }
1889
1890         link_up = 1;
1891         np->link_config.active_speed = SPEED_10000;
1892         np->link_config.active_duplex = DUPLEX_FULL;
1893         err = 0;
1894
1895 out:
1896         *link_up_p = link_up;
1897         if (np->flags & NIU_FLAGS_HOTPLUG_PHY)
1898                 err = 0;
1899         return err;
1900 }
1901
1902 static int link_status_10g_bcom(struct niu *np, int *link_up_p)
1903 {
1904         int err, link_up;
1905
1906         link_up = 0;
1907
1908         err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
1909                         BCM8704_PMD_RCV_SIGDET);
1910         if (err < 0)
1911                 goto out;
1912         if (!(err & PMD_RCV_SIGDET_GLOBAL)) {
1913                 err = 0;
1914                 goto out;
1915         }
1916
1917         err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
1918                         BCM8704_PCS_10G_R_STATUS);
1919         if (err < 0)
1920                 goto out;
1921         if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) {
1922                 err = 0;
1923                 goto out;
1924         }
1925
1926         err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
1927                         BCM8704_PHYXS_XGXS_LANE_STAT);
1928         if (err < 0)
1929                 goto out;
1930
1931         if (err != (PHYXS_XGXS_LANE_STAT_ALINGED |
1932                     PHYXS_XGXS_LANE_STAT_MAGIC |
1933                     PHYXS_XGXS_LANE_STAT_LANE3 |
1934                     PHYXS_XGXS_LANE_STAT_LANE2 |
1935                     PHYXS_XGXS_LANE_STAT_LANE1 |
1936                     PHYXS_XGXS_LANE_STAT_LANE0)) {
1937                 err = 0;
1938                 goto out;
1939         }
1940
1941         link_up = 1;
1942         np->link_config.active_speed = SPEED_10000;
1943         np->link_config.active_duplex = DUPLEX_FULL;
1944         err = 0;
1945
1946 out:
1947         *link_up_p = link_up;
1948         return err;
1949 }
1950
1951 static int link_status_10g(struct niu *np, int *link_up_p)
1952 {
1953         unsigned long flags;
1954         int err = -EINVAL;
1955
1956         spin_lock_irqsave(&np->lock, flags);
1957
1958         if (np->link_config.loopback_mode == LOOPBACK_DISABLED) {
1959                 int phy_id;
1960
1961                 phy_id = phy_decode(np->parent->port_phy, np->port);
1962                 phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port];
1963
1964                 /* handle different phy types */
1965                 switch (phy_id & NIU_PHY_ID_MASK) {
1966                 case NIU_PHY_ID_MRVL88X2011:
1967                         err = link_status_10g_mrvl(np, link_up_p);
1968                         break;
1969
1970                 default: /* bcom 8704 */
1971                         err = link_status_10g_bcom(np, link_up_p);
1972                         break;
1973                 }
1974         }
1975
1976         spin_unlock_irqrestore(&np->lock, flags);
1977
1978         return err;
1979 }
1980
1981 static int niu_10g_phy_present(struct niu *np)
1982 {
1983         u64 sig, mask, val;
1984
1985         sig = nr64(ESR_INT_SIGNALS);
1986         switch (np->port) {
1987         case 0:
1988                 mask = ESR_INT_SIGNALS_P0_BITS;
1989                 val = (ESR_INT_SRDY0_P0 |
1990                        ESR_INT_DET0_P0 |
1991                        ESR_INT_XSRDY_P0 |
1992                        ESR_INT_XDP_P0_CH3 |
1993                        ESR_INT_XDP_P0_CH2 |
1994                        ESR_INT_XDP_P0_CH1 |
1995                        ESR_INT_XDP_P0_CH0);
1996                 break;
1997
1998         case 1:
1999                 mask = ESR_INT_SIGNALS_P1_BITS;
2000                 val = (ESR_INT_SRDY0_P1 |
2001                        ESR_INT_DET0_P1 |
2002                        ESR_INT_XSRDY_P1 |
2003                        ESR_INT_XDP_P1_CH3 |
2004                        ESR_INT_XDP_P1_CH2 |
2005                        ESR_INT_XDP_P1_CH1 |
2006                        ESR_INT_XDP_P1_CH0);
2007                 break;
2008
2009         default:
2010                 return 0;
2011         }
2012
2013         if ((sig & mask) != val)
2014                 return 0;
2015         return 1;
2016 }
2017
2018 static int link_status_10g_hotplug(struct niu *np, int *link_up_p)
2019 {
2020         unsigned long flags;
2021         int err = 0;
2022         int phy_present;
2023         int phy_present_prev;
2024
2025         spin_lock_irqsave(&np->lock, flags);
2026
2027         if (np->link_config.loopback_mode == LOOPBACK_DISABLED) {
2028                 phy_present_prev = (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) ?
2029                         1 : 0;
2030                 phy_present = niu_10g_phy_present(np);
2031                 if (phy_present != phy_present_prev) {
2032                         /* state change */
2033                         if (phy_present) {
2034                                 np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT;
2035                                 if (np->phy_ops->xcvr_init)
2036                                         err = np->phy_ops->xcvr_init(np);
2037                                 if (err) {
2038                                         /* debounce */
2039                                         np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
2040                                 }
2041                         } else {
2042                                 np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
2043                                 *link_up_p = 0;
2044                                 niuwarn(LINK, "%s: Hotplug PHY Removed\n",
2045                                         np->dev->name);
2046                         }
2047                 }
2048                 if (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT)
2049                         err = link_status_10g_bcm8706(np, link_up_p);
2050         }
2051
2052         spin_unlock_irqrestore(&np->lock, flags);
2053
2054         return err;
2055 }
2056
2057 static int link_status_1g(struct niu *np, int *link_up_p)
2058 {
2059         struct niu_link_config *lp = &np->link_config;
2060         u16 current_speed, bmsr;
2061         unsigned long flags;
2062         u8 current_duplex;
2063         int err, link_up;
2064
2065         link_up = 0;
2066         current_speed = SPEED_INVALID;
2067         current_duplex = DUPLEX_INVALID;
2068
2069         spin_lock_irqsave(&np->lock, flags);
2070
2071         err = -EINVAL;
2072         if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
2073                 goto out;
2074
2075         err = mii_read(np, np->phy_addr, MII_BMSR);
2076         if (err < 0)
2077                 goto out;
2078
2079         bmsr = err;
2080         if (bmsr & BMSR_LSTATUS) {
2081                 u16 adv, lpa, common, estat;
2082
2083                 err = mii_read(np, np->phy_addr, MII_ADVERTISE);
2084                 if (err < 0)
2085                         goto out;
2086                 adv = err;
2087
2088                 err = mii_read(np, np->phy_addr, MII_LPA);
2089                 if (err < 0)
2090                         goto out;
2091                 lpa = err;
2092
2093                 common = adv & lpa;
2094
2095                 err = mii_read(np, np->phy_addr, MII_ESTATUS);
2096                 if (err < 0)
2097                         goto out;
2098                 estat = err;
2099
2100                 link_up = 1;
2101                 if (estat & (ESTATUS_1000_TFULL | ESTATUS_1000_THALF)) {
2102                         current_speed = SPEED_1000;
2103                         if (estat & ESTATUS_1000_TFULL)
2104                                 current_duplex = DUPLEX_FULL;
2105                         else
2106                                 current_duplex = DUPLEX_HALF;
2107                 } else {
2108                         if (common & ADVERTISE_100BASE4) {
2109                                 current_speed = SPEED_100;
2110                                 current_duplex = DUPLEX_HALF;
2111                         } else if (common & ADVERTISE_100FULL) {
2112                                 current_speed = SPEED_100;
2113                                 current_duplex = DUPLEX_FULL;
2114                         } else if (common & ADVERTISE_100HALF) {
2115                                 current_speed = SPEED_100;
2116                                 current_duplex = DUPLEX_HALF;
2117                         } else if (common & ADVERTISE_10FULL) {
2118                                 current_speed = SPEED_10;
2119                                 current_duplex = DUPLEX_FULL;
2120                         } else if (common & ADVERTISE_10HALF) {
2121                                 current_speed = SPEED_10;
2122                                 current_duplex = DUPLEX_HALF;
2123                         } else
2124                                 link_up = 0;
2125                 }
2126         }
2127         lp->active_speed = current_speed;
2128         lp->active_duplex = current_duplex;
2129         err = 0;
2130
2131 out:
2132         spin_unlock_irqrestore(&np->lock, flags);
2133
2134         *link_up_p = link_up;
2135         return err;
2136 }
2137
2138 static int niu_link_status(struct niu *np, int *link_up_p)
2139 {
2140         const struct niu_phy_ops *ops = np->phy_ops;
2141         int err;
2142
2143         err = 0;
2144         if (ops->link_status)
2145                 err = ops->link_status(np, link_up_p);
2146
2147         return err;
2148 }
2149
2150 static void niu_timer(unsigned long __opaque)
2151 {
2152         struct niu *np = (struct niu *) __opaque;
2153         unsigned long off;
2154         int err, link_up;
2155
2156         err = niu_link_status(np, &link_up);
2157         if (!err)
2158                 niu_link_status_common(np, link_up);
2159
2160         if (netif_carrier_ok(np->dev))
2161                 off = 5 * HZ;
2162         else
2163                 off = 1 * HZ;
2164         np->timer.expires = jiffies + off;
2165
2166         add_timer(&np->timer);
2167 }
2168
2169 static const struct niu_phy_ops phy_ops_10g_serdes = {
2170         .serdes_init            = serdes_init_10g_serdes,
2171         .link_status            = link_status_10g_serdes,
2172 };
2173
2174 static const struct niu_phy_ops phy_ops_10g_serdes_niu = {
2175         .serdes_init            = serdes_init_niu_10g_serdes,
2176         .link_status            = link_status_10g_serdes,
2177 };
2178
2179 static const struct niu_phy_ops phy_ops_1g_serdes_niu = {
2180         .serdes_init            = serdes_init_niu_1g_serdes,
2181         .link_status            = link_status_1g_serdes,
2182 };
2183
2184 static const struct niu_phy_ops phy_ops_1g_rgmii = {
2185         .xcvr_init              = xcvr_init_1g_rgmii,
2186         .link_status            = link_status_1g_rgmii,
2187 };
2188
2189 static const struct niu_phy_ops phy_ops_10g_fiber_niu = {
2190         .serdes_init            = serdes_init_niu_10g_fiber,
2191         .xcvr_init              = xcvr_init_10g,
2192         .link_status            = link_status_10g,
2193 };
2194
2195 static const struct niu_phy_ops phy_ops_10g_fiber = {
2196         .serdes_init            = serdes_init_10g,
2197         .xcvr_init              = xcvr_init_10g,
2198         .link_status            = link_status_10g,
2199 };
2200
2201 static const struct niu_phy_ops phy_ops_10g_fiber_hotplug = {
2202         .serdes_init            = serdes_init_10g,
2203         .xcvr_init              = xcvr_init_10g_bcm8706,
2204         .link_status            = link_status_10g_hotplug,
2205 };
2206
2207 static const struct niu_phy_ops phy_ops_10g_copper = {
2208         .serdes_init            = serdes_init_10g,
2209         .link_status            = link_status_10g, /* XXX */
2210 };
2211
2212 static const struct niu_phy_ops phy_ops_1g_fiber = {
2213         .serdes_init            = serdes_init_1g,
2214         .xcvr_init              = xcvr_init_1g,
2215         .link_status            = link_status_1g,
2216 };
2217
2218 static const struct niu_phy_ops phy_ops_1g_copper = {
2219         .xcvr_init              = xcvr_init_1g,
2220         .link_status            = link_status_1g,
2221 };
2222
2223 struct niu_phy_template {
2224         const struct niu_phy_ops        *ops;
2225         u32                             phy_addr_base;
2226 };
2227
2228 static const struct niu_phy_template phy_template_niu_10g_fiber = {
2229         .ops            = &phy_ops_10g_fiber_niu,
2230         .phy_addr_base  = 16,
2231 };
2232
2233 static const struct niu_phy_template phy_template_niu_10g_serdes = {
2234         .ops            = &phy_ops_10g_serdes_niu,
2235         .phy_addr_base  = 0,
2236 };
2237
2238 static const struct niu_phy_template phy_template_niu_1g_serdes = {
2239         .ops            = &phy_ops_1g_serdes_niu,
2240         .phy_addr_base  = 0,
2241 };
2242
2243 static const struct niu_phy_template phy_template_10g_fiber = {
2244         .ops            = &phy_ops_10g_fiber,
2245         .phy_addr_base  = 8,
2246 };
2247
2248 static const struct niu_phy_template phy_template_10g_fiber_hotplug = {
2249         .ops            = &phy_ops_10g_fiber_hotplug,
2250         .phy_addr_base  = 8,
2251 };
2252
2253 static const struct niu_phy_template phy_template_10g_copper = {
2254         .ops            = &phy_ops_10g_copper,
2255         .phy_addr_base  = 10,
2256 };
2257
2258 static const struct niu_phy_template phy_template_1g_fiber = {
2259         .ops            = &phy_ops_1g_fiber,
2260         .phy_addr_base  = 0,
2261 };
2262
2263 static const struct niu_phy_template phy_template_1g_copper = {
2264         .ops            = &phy_ops_1g_copper,
2265         .phy_addr_base  = 0,
2266 };
2267
2268 static const struct niu_phy_template phy_template_1g_rgmii = {
2269         .ops            = &phy_ops_1g_rgmii,
2270         .phy_addr_base  = 0,
2271 };
2272
2273 static const struct niu_phy_template phy_template_10g_serdes = {
2274         .ops            = &phy_ops_10g_serdes,
2275         .phy_addr_base  = 0,
2276 };
2277
2278 static int niu_atca_port_num[4] = {
2279         0, 0,  11, 10
2280 };
2281
2282 static int serdes_init_10g_serdes(struct niu *np)
2283 {
2284         struct niu_link_config *lp = &np->link_config;
2285         unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i;
2286         u64 ctrl_val, test_cfg_val, sig, mask, val;
2287         int err;
2288         u64 reset_val;
2289
2290         switch (np->port) {
2291         case 0:
2292                 reset_val =  ENET_SERDES_RESET_0;
2293                 ctrl_reg = ENET_SERDES_0_CTRL_CFG;
2294                 test_cfg_reg = ENET_SERDES_0_TEST_CFG;
2295                 pll_cfg = ENET_SERDES_0_PLL_CFG;
2296                 break;
2297         case 1:
2298                 reset_val =  ENET_SERDES_RESET_1;
2299                 ctrl_reg = ENET_SERDES_1_CTRL_CFG;
2300                 test_cfg_reg = ENET_SERDES_1_TEST_CFG;
2301                 pll_cfg = ENET_SERDES_1_PLL_CFG;
2302                 break;
2303
2304         default:
2305                 return -EINVAL;
2306         }
2307         ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
2308                     ENET_SERDES_CTRL_SDET_1 |
2309                     ENET_SERDES_CTRL_SDET_2 |
2310                     ENET_SERDES_CTRL_SDET_3 |
2311                     (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
2312                     (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
2313                     (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
2314                     (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
2315                     (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
2316                     (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
2317                     (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
2318                     (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
2319         test_cfg_val = 0;
2320
2321         if (lp->loopback_mode == LOOPBACK_PHY) {
2322                 test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
2323                                   ENET_SERDES_TEST_MD_0_SHIFT) |
2324                                  (ENET_TEST_MD_PAD_LOOPBACK <<
2325                                   ENET_SERDES_TEST_MD_1_SHIFT) |
2326                                  (ENET_TEST_MD_PAD_LOOPBACK <<
2327                                   ENET_SERDES_TEST_MD_2_SHIFT) |
2328                                  (ENET_TEST_MD_PAD_LOOPBACK <<
2329                                   ENET_SERDES_TEST_MD_3_SHIFT));
2330         }
2331
2332         esr_reset(np);
2333         nw64(pll_cfg, ENET_SERDES_PLL_FBDIV2);
2334         nw64(ctrl_reg, ctrl_val);
2335         nw64(test_cfg_reg, test_cfg_val);
2336
2337         /* Initialize all 4 lanes of the SERDES.  */
2338         for (i = 0; i < 4; i++) {
2339                 u32 rxtx_ctrl, glue0;
2340
2341                 err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
2342                 if (err)
2343                         return err;
2344                 err = esr_read_glue0(np, i, &glue0);
2345                 if (err)
2346                         return err;
2347
2348                 rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
2349                 rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
2350                               (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
2351
2352                 glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
2353                            ESR_GLUE_CTRL0_THCNT |
2354                            ESR_GLUE_CTRL0_BLTIME);
2355                 glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
2356                           (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
2357                           (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
2358                           (BLTIME_300_CYCLES <<
2359                            ESR_GLUE_CTRL0_BLTIME_SHIFT));
2360
2361                 err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
2362                 if (err)
2363                         return err;
2364                 err = esr_write_glue0(np, i, glue0);
2365                 if (err)
2366                         return err;
2367         }
2368
2369
2370         sig = nr64(ESR_INT_SIGNALS);
2371         switch (np->port) {
2372         case 0:
2373                 mask = ESR_INT_SIGNALS_P0_BITS;
2374                 val = (ESR_INT_SRDY0_P0 |
2375                        ESR_INT_DET0_P0 |
2376                        ESR_INT_XSRDY_P0 |
2377                        ESR_INT_XDP_P0_CH3 |
2378                        ESR_INT_XDP_P0_CH2 |
2379                        ESR_INT_XDP_P0_CH1 |
2380                        ESR_INT_XDP_P0_CH0);
2381                 break;
2382
2383         case 1:
2384                 mask = ESR_INT_SIGNALS_P1_BITS;
2385                 val = (ESR_INT_SRDY0_P1 |
2386                        ESR_INT_DET0_P1 |
2387                        ESR_INT_XSRDY_P1 |
2388                        ESR_INT_XDP_P1_CH3 |
2389                        ESR_INT_XDP_P1_CH2 |
2390                        ESR_INT_XDP_P1_CH1 |
2391                        ESR_INT_XDP_P1_CH0);
2392                 break;
2393
2394         default:
2395                 return -EINVAL;
2396         }
2397
2398         if ((sig & mask) != val) {
2399                 int err;
2400                 err = serdes_init_1g_serdes(np);
2401                 if (!err) {
2402                         np->flags &= ~NIU_FLAGS_10G;
2403                         np->mac_xcvr = MAC_XCVR_PCS;
2404                 }  else {
2405                         dev_err(np->device, PFX "Port %u 10G/1G SERDES Link Failed \n",
2406                          np->port);
2407                         return -ENODEV;
2408                 }
2409         }
2410
2411         return 0;
2412 }
2413
2414 static int niu_determine_phy_disposition(struct niu *np)
2415 {
2416         struct niu_parent *parent = np->parent;
2417         u8 plat_type = parent->plat_type;
2418         const struct niu_phy_template *tp;
2419         u32 phy_addr_off = 0;
2420
2421         if (plat_type == PLAT_TYPE_NIU) {
2422                 switch (np->flags &
2423                         (NIU_FLAGS_10G |
2424                          NIU_FLAGS_FIBER |
2425                          NIU_FLAGS_XCVR_SERDES)) {
2426                 case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
2427                         /* 10G Serdes */
2428                         tp = &phy_template_niu_10g_serdes;
2429                         break;
2430                 case NIU_FLAGS_XCVR_SERDES:
2431                         /* 1G Serdes */
2432                         tp = &phy_template_niu_1g_serdes;
2433                         break;
2434                 case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
2435                         /* 10G Fiber */
2436                 default:
2437                         tp = &phy_template_niu_10g_fiber;
2438                         phy_addr_off += np->port;
2439                         break;
2440                 }
2441         } else {
2442                 switch (np->flags &
2443                         (NIU_FLAGS_10G |
2444                          NIU_FLAGS_FIBER |
2445                          NIU_FLAGS_XCVR_SERDES)) {
2446                 case 0:
2447                         /* 1G copper */
2448                         tp = &phy_template_1g_copper;
2449                         if (plat_type == PLAT_TYPE_VF_P0)
2450                                 phy_addr_off = 10;
2451                         else if (plat_type == PLAT_TYPE_VF_P1)
2452                                 phy_addr_off = 26;
2453
2454                         phy_addr_off += (np->port ^ 0x3);
2455                         break;
2456
2457                 case NIU_FLAGS_10G:
2458                         /* 10G copper */
2459                         tp = &phy_template_1g_copper;
2460                         break;
2461
2462                 case NIU_FLAGS_FIBER:
2463                         /* 1G fiber */
2464                         tp = &phy_template_1g_fiber;
2465                         break;
2466
2467                 case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
2468                         /* 10G fiber */
2469                         tp = &phy_template_10g_fiber;
2470                         if (plat_type == PLAT_TYPE_VF_P0 ||
2471                             plat_type == PLAT_TYPE_VF_P1)
2472                                 phy_addr_off = 8;
2473                         phy_addr_off += np->port;
2474                         if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
2475                                 tp = &phy_template_10g_fiber_hotplug;
2476                                 if (np->port == 0)
2477                                         phy_addr_off = 8;
2478                                 if (np->port == 1)
2479                                         phy_addr_off = 12;
2480                         }
2481                         break;
2482
2483                 case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
2484                 case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER:
2485                 case NIU_FLAGS_XCVR_SERDES:
2486                         switch(np->port) {
2487                         case 0:
2488                         case 1:
2489                                 tp = &phy_template_10g_serdes;
2490                                 break;
2491                         case 2:
2492                         case 3:
2493                                 tp = &phy_template_1g_rgmii;
2494                                 break;
2495                         default:
2496                                 return -EINVAL;
2497                                 break;
2498                         }
2499                         phy_addr_off = niu_atca_port_num[np->port];
2500                         break;
2501
2502                 default:
2503                         return -EINVAL;
2504                 }
2505         }
2506
2507         np->phy_ops = tp->ops;
2508         np->phy_addr = tp->phy_addr_base + phy_addr_off;
2509
2510         return 0;
2511 }
2512
2513 static int niu_init_link(struct niu *np)
2514 {
2515         struct niu_parent *parent = np->parent;
2516         int err, ignore;
2517
2518         if (parent->plat_type == PLAT_TYPE_NIU) {
2519                 err = niu_xcvr_init(np);
2520                 if (err)
2521                         return err;
2522                 msleep(200);
2523         }
2524         err = niu_serdes_init(np);
2525         if (err)
2526                 return err;
2527         msleep(200);
2528         err = niu_xcvr_init(np);
2529         if (!err)
2530                 niu_link_status(np, &ignore);
2531         return 0;
2532 }
2533
2534 static void niu_set_primary_mac(struct niu *np, unsigned char *addr)
2535 {
2536         u16 reg0 = addr[4] << 8 | addr[5];
2537         u16 reg1 = addr[2] << 8 | addr[3];
2538         u16 reg2 = addr[0] << 8 | addr[1];
2539
2540         if (np->flags & NIU_FLAGS_XMAC) {
2541                 nw64_mac(XMAC_ADDR0, reg0);
2542                 nw64_mac(XMAC_ADDR1, reg1);
2543                 nw64_mac(XMAC_ADDR2, reg2);
2544         } else {
2545                 nw64_mac(BMAC_ADDR0, reg0);
2546                 nw64_mac(BMAC_ADDR1, reg1);
2547                 nw64_mac(BMAC_ADDR2, reg2);
2548         }
2549 }
2550
2551 static int niu_num_alt_addr(struct niu *np)
2552 {
2553         if (np->flags & NIU_FLAGS_XMAC)
2554                 return XMAC_NUM_ALT_ADDR;
2555         else
2556                 return BMAC_NUM_ALT_ADDR;
2557 }
2558
2559 static int niu_set_alt_mac(struct niu *np, int index, unsigned char *addr)
2560 {
2561         u16 reg0 = addr[4] << 8 | addr[5];
2562         u16 reg1 = addr[2] << 8 | addr[3];
2563         u16 reg2 = addr[0] << 8 | addr[1];
2564
2565         if (index >= niu_num_alt_addr(np))
2566                 return -EINVAL;
2567
2568         if (np->flags & NIU_FLAGS_XMAC) {
2569                 nw64_mac(XMAC_ALT_ADDR0(index), reg0);
2570                 nw64_mac(XMAC_ALT_ADDR1(index), reg1);
2571                 nw64_mac(XMAC_ALT_ADDR2(index), reg2);
2572         } else {
2573                 nw64_mac(BMAC_ALT_ADDR0(index), reg0);
2574                 nw64_mac(BMAC_ALT_ADDR1(index), reg1);
2575                 nw64_mac(BMAC_ALT_ADDR2(index), reg2);
2576         }
2577
2578         return 0;
2579 }
2580
2581 static int niu_enable_alt_mac(struct niu *np, int index, int on)
2582 {
2583         unsigned long reg;
2584         u64 val, mask;
2585
2586         if (index >= niu_num_alt_addr(np))
2587                 return -EINVAL;
2588
2589         if (np->flags & NIU_FLAGS_XMAC) {
2590                 reg = XMAC_ADDR_CMPEN;
2591                 mask = 1 << index;
2592         } else {
2593                 reg = BMAC_ADDR_CMPEN;
2594                 mask = 1 << (index + 1);
2595         }
2596
2597         val = nr64_mac(reg);
2598         if (on)
2599                 val |= mask;
2600         else
2601                 val &= ~mask;
2602         nw64_mac(reg, val);
2603
2604         return 0;
2605 }
2606
2607 static void __set_rdc_table_num_hw(struct niu *np, unsigned long reg,
2608                                    int num, int mac_pref)
2609 {
2610         u64 val = nr64_mac(reg);
2611         val &= ~(HOST_INFO_MACRDCTBLN | HOST_INFO_MPR);
2612         val |= num;
2613         if (mac_pref)
2614                 val |= HOST_INFO_MPR;
2615         nw64_mac(reg, val);
2616 }
2617
2618 static int __set_rdc_table_num(struct niu *np,
2619                                int xmac_index, int bmac_index,
2620                                int rdc_table_num, int mac_pref)
2621 {
2622         unsigned long reg;
2623
2624         if (rdc_table_num & ~HOST_INFO_MACRDCTBLN)
2625                 return -EINVAL;
2626         if (np->flags & NIU_FLAGS_XMAC)
2627                 reg = XMAC_HOST_INFO(xmac_index);
2628         else
2629                 reg = BMAC_HOST_INFO(bmac_index);
2630         __set_rdc_table_num_hw(np, reg, rdc_table_num, mac_pref);
2631         return 0;
2632 }
2633
2634 static int niu_set_primary_mac_rdc_table(struct niu *np, int table_num,
2635                                          int mac_pref)
2636 {
2637         return __set_rdc_table_num(np, 17, 0, table_num, mac_pref);
2638 }
2639
2640 static int niu_set_multicast_mac_rdc_table(struct niu *np, int table_num,
2641                                            int mac_pref)
2642 {
2643         return __set_rdc_table_num(np, 16, 8, table_num, mac_pref);
2644 }
2645
2646 static int niu_set_alt_mac_rdc_table(struct niu *np, int idx,
2647                                      int table_num, int mac_pref)
2648 {
2649         if (idx >= niu_num_alt_addr(np))
2650                 return -EINVAL;
2651         return __set_rdc_table_num(np, idx, idx + 1, table_num, mac_pref);
2652 }
2653
2654 static u64 vlan_entry_set_parity(u64 reg_val)
2655 {
2656         u64 port01_mask;
2657         u64 port23_mask;
2658
2659         port01_mask = 0x00ff;
2660         port23_mask = 0xff00;
2661
2662         if (hweight64(reg_val & port01_mask) & 1)
2663                 reg_val |= ENET_VLAN_TBL_PARITY0;
2664         else
2665                 reg_val &= ~ENET_VLAN_TBL_PARITY0;
2666
2667         if (hweight64(reg_val & port23_mask) & 1)
2668                 reg_val |= ENET_VLAN_TBL_PARITY1;
2669         else
2670                 reg_val &= ~ENET_VLAN_TBL_PARITY1;
2671
2672         return reg_val;
2673 }
2674
2675 static void vlan_tbl_write(struct niu *np, unsigned long index,
2676                            int port, int vpr, int rdc_table)
2677 {
2678         u64 reg_val = nr64(ENET_VLAN_TBL(index));
2679
2680         reg_val &= ~((ENET_VLAN_TBL_VPR |
2681                       ENET_VLAN_TBL_VLANRDCTBLN) <<
2682                      ENET_VLAN_TBL_SHIFT(port));
2683         if (vpr)
2684                 reg_val |= (ENET_VLAN_TBL_VPR <<
2685                             ENET_VLAN_TBL_SHIFT(port));
2686         reg_val |= (rdc_table << ENET_VLAN_TBL_SHIFT(port));
2687
2688         reg_val = vlan_entry_set_parity(reg_val);
2689
2690         nw64(ENET_VLAN_TBL(index), reg_val);
2691 }
2692
2693 static void vlan_tbl_clear(struct niu *np)
2694 {
2695         int i;
2696
2697         for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++)
2698                 nw64(ENET_VLAN_TBL(i), 0);
2699 }
2700
2701 static int tcam_wait_bit(struct niu *np, u64 bit)
2702 {
2703         int limit = 1000;
2704
2705         while (--limit > 0) {
2706                 if (nr64(TCAM_CTL) & bit)
2707                         break;
2708                 udelay(1);
2709         }
2710         if (limit < 0)
2711                 return -ENODEV;
2712
2713         return 0;
2714 }
2715
2716 static int tcam_flush(struct niu *np, int index)
2717 {
2718         nw64(TCAM_KEY_0, 0x00);
2719         nw64(TCAM_KEY_MASK_0, 0xff);
2720         nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index));
2721
2722         return tcam_wait_bit(np, TCAM_CTL_STAT);
2723 }
2724
2725 #if 0
2726 static int tcam_read(struct niu *np, int index,
2727                      u64 *key, u64 *mask)
2728 {
2729         int err;
2730
2731         nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_READ | index));
2732         err = tcam_wait_bit(np, TCAM_CTL_STAT);
2733         if (!err) {
2734                 key[0] = nr64(TCAM_KEY_0);
2735                 key[1] = nr64(TCAM_KEY_1);
2736                 key[2] = nr64(TCAM_KEY_2);
2737                 key[3] = nr64(TCAM_KEY_3);
2738                 mask[0] = nr64(TCAM_KEY_MASK_0);
2739                 mask[1] = nr64(TCAM_KEY_MASK_1);
2740                 mask[2] = nr64(TCAM_KEY_MASK_2);
2741                 mask[3] = nr64(TCAM_KEY_MASK_3);
2742         }
2743         return err;
2744 }
2745 #endif
2746
2747 static int tcam_write(struct niu *np, int index,
2748                       u64 *key, u64 *mask)
2749 {
2750         nw64(TCAM_KEY_0, key[0]);
2751         nw64(TCAM_KEY_1, key[1]);
2752         nw64(TCAM_KEY_2, key[2]);
2753         nw64(TCAM_KEY_3, key[3]);
2754         nw64(TCAM_KEY_MASK_0, mask[0]);
2755         nw64(TCAM_KEY_MASK_1, mask[1]);
2756         nw64(TCAM_KEY_MASK_2, mask[2]);
2757         nw64(TCAM_KEY_MASK_3, mask[3]);
2758         nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index));
2759
2760         return tcam_wait_bit(np, TCAM_CTL_STAT);
2761 }
2762
2763 #if 0
2764 static int tcam_assoc_read(struct niu *np, int index, u64 *data)
2765 {
2766         int err;
2767
2768         nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_READ | index));
2769         err = tcam_wait_bit(np, TCAM_CTL_STAT);
2770         if (!err)
2771                 *data = nr64(TCAM_KEY_1);
2772
2773         return err;
2774 }
2775 #endif
2776
2777 static int tcam_assoc_write(struct niu *np, int index, u64 assoc_data)
2778 {
2779         nw64(TCAM_KEY_1, assoc_data);
2780         nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_WRITE | index));
2781
2782         return tcam_wait_bit(np, TCAM_CTL_STAT);
2783 }
2784
2785 static void tcam_enable(struct niu *np, int on)
2786 {
2787         u64 val = nr64(FFLP_CFG_1);
2788
2789         if (on)
2790                 val &= ~FFLP_CFG_1_TCAM_DIS;
2791         else
2792                 val |= FFLP_CFG_1_TCAM_DIS;
2793         nw64(FFLP_CFG_1, val);
2794 }
2795
2796 static void tcam_set_lat_and_ratio(struct niu *np, u64 latency, u64 ratio)
2797 {
2798         u64 val = nr64(FFLP_CFG_1);
2799
2800         val &= ~(FFLP_CFG_1_FFLPINITDONE |
2801                  FFLP_CFG_1_CAMLAT |
2802                  FFLP_CFG_1_CAMRATIO);
2803         val |= (latency << FFLP_CFG_1_CAMLAT_SHIFT);
2804         val |= (ratio << FFLP_CFG_1_CAMRATIO_SHIFT);
2805         nw64(FFLP_CFG_1, val);
2806
2807         val = nr64(FFLP_CFG_1);
2808         val |= FFLP_CFG_1_FFLPINITDONE;
2809         nw64(FFLP_CFG_1, val);
2810 }
2811
2812 static int tcam_user_eth_class_enable(struct niu *np, unsigned long class,
2813                                       int on)
2814 {
2815         unsigned long reg;
2816         u64 val;
2817
2818         if (class < CLASS_CODE_ETHERTYPE1 ||
2819             class > CLASS_CODE_ETHERTYPE2)
2820                 return -EINVAL;
2821
2822         reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1);
2823         val = nr64(reg);
2824         if (on)
2825                 val |= L2_CLS_VLD;
2826         else
2827                 val &= ~L2_CLS_VLD;
2828         nw64(reg, val);
2829
2830         return 0;
2831 }
2832
2833 #if 0
2834 static int tcam_user_eth_class_set(struct niu *np, unsigned long class,
2835                                    u64 ether_type)
2836 {
2837         unsigned long reg;
2838         u64 val;
2839
2840         if (class < CLASS_CODE_ETHERTYPE1 ||
2841             class > CLASS_CODE_ETHERTYPE2 ||
2842             (ether_type & ~(u64)0xffff) != 0)
2843                 return -EINVAL;
2844
2845         reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1);
2846         val = nr64(reg);
2847         val &= ~L2_CLS_ETYPE;
2848         val |= (ether_type << L2_CLS_ETYPE_SHIFT);
2849         nw64(reg, val);
2850
2851         return 0;
2852 }
2853 #endif
2854
2855 static int tcam_user_ip_class_enable(struct niu *np, unsigned long class,
2856                                      int on)
2857 {
2858         unsigned long reg;
2859         u64 val;
2860
2861         if (class < CLASS_CODE_USER_PROG1 ||
2862             class > CLASS_CODE_USER_PROG4)
2863                 return -EINVAL;
2864
2865         reg = L3_CLS(class - CLASS_CODE_USER_PROG1);
2866         val = nr64(reg);
2867         if (on)
2868                 val |= L3_CLS_VALID;
2869         else
2870                 val &= ~L3_CLS_VALID;
2871         nw64(reg, val);
2872
2873         return 0;
2874 }
2875
2876 #if 0
2877 static int tcam_user_ip_class_set(struct niu *np, unsigned long class,
2878                                   int ipv6, u64 protocol_id,
2879                                   u64 tos_mask, u64 tos_val)
2880 {
2881         unsigned long reg;
2882         u64 val;
2883
2884         if (class < CLASS_CODE_USER_PROG1 ||
2885             class > CLASS_CODE_USER_PROG4 ||
2886             (protocol_id & ~(u64)0xff) != 0 ||
2887             (tos_mask & ~(u64)0xff) != 0 ||
2888             (tos_val & ~(u64)0xff) != 0)
2889                 return -EINVAL;
2890
2891         reg = L3_CLS(class - CLASS_CODE_USER_PROG1);
2892         val = nr64(reg);
2893         val &= ~(L3_CLS_IPVER | L3_CLS_PID |
2894                  L3_CLS_TOSMASK | L3_CLS_TOS);
2895         if (ipv6)
2896                 val |= L3_CLS_IPVER;
2897         val |= (protocol_id << L3_CLS_PID_SHIFT);
2898         val |= (tos_mask << L3_CLS_TOSMASK_SHIFT);
2899         val |= (tos_val << L3_CLS_TOS_SHIFT);
2900         nw64(reg, val);
2901
2902         return 0;
2903 }
2904 #endif
2905
2906 static int tcam_early_init(struct niu *np)
2907 {
2908         unsigned long i;
2909         int err;
2910
2911         tcam_enable(np, 0);
2912         tcam_set_lat_and_ratio(np,
2913                                DEFAULT_TCAM_LATENCY,
2914                                DEFAULT_TCAM_ACCESS_RATIO);
2915         for (i = CLASS_CODE_ETHERTYPE1; i <= CLASS_CODE_ETHERTYPE2; i++) {
2916                 err = tcam_user_eth_class_enable(np, i, 0);
2917                 if (err)
2918                         return err;
2919         }
2920         for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_USER_PROG4; i++) {
2921                 err = tcam_user_ip_class_enable(np, i, 0);
2922                 if (err)
2923                         return err;
2924         }
2925
2926         return 0;
2927 }
2928
2929 static int tcam_flush_all(struct niu *np)
2930 {
2931         unsigned long i;
2932
2933         for (i = 0; i < np->parent->tcam_num_entries; i++) {
2934                 int err = tcam_flush(np, i);
2935                 if (err)
2936                         return err;
2937         }
2938         return 0;
2939 }
2940
2941 static u64 hash_addr_regval(unsigned long index, unsigned long num_entries)
2942 {
2943         return ((u64)index | (num_entries == 1 ?
2944                               HASH_TBL_ADDR_AUTOINC : 0));
2945 }
2946
2947 #if 0
2948 static int hash_read(struct niu *np, unsigned long partition,
2949                      unsigned long index, unsigned long num_entries,
2950                      u64 *data)
2951 {
2952         u64 val = hash_addr_regval(index, num_entries);
2953         unsigned long i;
2954
2955         if (partition >= FCRAM_NUM_PARTITIONS ||
2956             index + num_entries > FCRAM_SIZE)
2957                 return -EINVAL;
2958
2959         nw64(HASH_TBL_ADDR(partition), val);
2960         for (i = 0; i < num_entries; i++)
2961                 data[i] = nr64(HASH_TBL_DATA(partition));
2962
2963         return 0;
2964 }
2965 #endif
2966
2967 static int hash_write(struct niu *np, unsigned long partition,
2968                       unsigned long index, unsigned long num_entries,
2969                       u64 *data)
2970 {
2971         u64 val = hash_addr_regval(index, num_entries);
2972         unsigned long i;
2973
2974         if (partition >= FCRAM_NUM_PARTITIONS ||
2975             index + (num_entries * 8) > FCRAM_SIZE)
2976                 return -EINVAL;
2977
2978         nw64(HASH_TBL_ADDR(partition), val);
2979         for (i = 0; i < num_entries; i++)
2980                 nw64(HASH_TBL_DATA(partition), data[i]);
2981
2982         return 0;
2983 }
2984
2985 static void fflp_reset(struct niu *np)
2986 {
2987         u64 val;
2988
2989         nw64(FFLP_CFG_1, FFLP_CFG_1_PIO_FIO_RST);
2990         udelay(10);
2991         nw64(FFLP_CFG_1, 0);
2992
2993         val = FFLP_CFG_1_FCRAMOUTDR_NORMAL | FFLP_CFG_1_FFLPINITDONE;
2994         nw64(FFLP_CFG_1, val);
2995 }
2996
2997 static void fflp_set_timings(struct niu *np)
2998 {
2999         u64 val = nr64(FFLP_CFG_1);
3000
3001         val &= ~FFLP_CFG_1_FFLPINITDONE;
3002         val |= (DEFAULT_FCRAMRATIO << FFLP_CFG_1_FCRAMRATIO_SHIFT);
3003         nw64(FFLP_CFG_1, val);
3004
3005         val = nr64(FFLP_CFG_1);
3006         val |= FFLP_CFG_1_FFLPINITDONE;
3007         nw64(FFLP_CFG_1, val);
3008
3009         val = nr64(FCRAM_REF_TMR);
3010         val &= ~(FCRAM_REF_TMR_MAX | FCRAM_REF_TMR_MIN);
3011         val |= (DEFAULT_FCRAM_REFRESH_MAX << FCRAM_REF_TMR_MAX_SHIFT);
3012         val |= (DEFAULT_FCRAM_REFRESH_MIN << FCRAM_REF_TMR_MIN_SHIFT);
3013         nw64(FCRAM_REF_TMR, val);
3014 }
3015
3016 static int fflp_set_partition(struct niu *np, u64 partition,
3017                               u64 mask, u64 base, int enable)
3018 {
3019         unsigned long reg;
3020         u64 val;
3021
3022         if (partition >= FCRAM_NUM_PARTITIONS ||
3023             (mask & ~(u64)0x1f) != 0 ||
3024             (base & ~(u64)0x1f) != 0)
3025                 return -EINVAL;
3026
3027         reg = FLW_PRT_SEL(partition);
3028
3029         val = nr64(reg);
3030         val &= ~(FLW_PRT_SEL_EXT | FLW_PRT_SEL_MASK | FLW_PRT_SEL_BASE);
3031         val |= (mask << FLW_PRT_SEL_MASK_SHIFT);
3032         val |= (base << FLW_PRT_SEL_BASE_SHIFT);
3033         if (enable)
3034                 val |= FLW_PRT_SEL_EXT;
3035         nw64(reg, val);
3036
3037         return 0;
3038 }
3039
3040 static int fflp_disable_all_partitions(struct niu *np)
3041 {
3042         unsigned long i;
3043
3044         for (i = 0; i < FCRAM_NUM_PARTITIONS; i++) {
3045                 int err = fflp_set_partition(np, 0, 0, 0, 0);
3046                 if (err)
3047                         return err;
3048         }
3049         return 0;
3050 }
3051
3052 static void fflp_llcsnap_enable(struct niu *np, int on)
3053 {
3054         u64 val = nr64(FFLP_CFG_1);
3055
3056         if (on)
3057                 val |= FFLP_CFG_1_LLCSNAP;
3058         else
3059                 val &= ~FFLP_CFG_1_LLCSNAP;
3060         nw64(FFLP_CFG_1, val);
3061 }
3062
3063 static void fflp_errors_enable(struct niu *np, int on)
3064 {
3065         u64 val = nr64(FFLP_CFG_1);
3066
3067         if (on)
3068                 val &= ~FFLP_CFG_1_ERRORDIS;
3069         else
3070                 val |= FFLP_CFG_1_ERRORDIS;
3071         nw64(FFLP_CFG_1, val);
3072 }
3073
3074 static int fflp_hash_clear(struct niu *np)
3075 {
3076         struct fcram_hash_ipv4 ent;
3077         unsigned long i;
3078
3079         /* IPV4 hash entry with valid bit clear, rest is don't care.  */
3080         memset(&ent, 0, sizeof(ent));
3081         ent.header = HASH_HEADER_EXT;
3082
3083         for (i = 0; i < FCRAM_SIZE; i += sizeof(ent)) {
3084                 int err = hash_write(np, 0, i, 1, (u64 *) &ent);
3085                 if (err)
3086                         return err;
3087         }
3088         return 0;
3089 }
3090
3091 static int fflp_early_init(struct niu *np)
3092 {
3093         struct niu_parent *parent;
3094         unsigned long flags;
3095         int err;
3096
3097         niu_lock_parent(np, flags);
3098
3099         parent = np->parent;
3100         err = 0;
3101         if (!(parent->flags & PARENT_FLGS_CLS_HWINIT)) {
3102                 niudbg(PROBE, "fflp_early_init: Initting hw on port %u\n",
3103                        np->port);
3104                 if (np->parent->plat_type != PLAT_TYPE_NIU) {
3105                         fflp_reset(np);
3106                         fflp_set_timings(np);
3107                         err = fflp_disable_all_partitions(np);
3108                         if (err) {
3109                                 niudbg(PROBE, "fflp_disable_all_partitions "
3110                                        "failed, err=%d\n", err);
3111                                 goto out;
3112                         }
3113                 }
3114
3115                 err = tcam_early_init(np);
3116                 if (err) {
3117                         niudbg(PROBE, "tcam_early_init failed, err=%d\n",
3118                                err);
3119                         goto out;
3120                 }
3121                 fflp_llcsnap_enable(np, 1);
3122                 fflp_errors_enable(np, 0);
3123                 nw64(H1POLY, 0);
3124                 nw64(H2POLY, 0);
3125
3126                 err = tcam_flush_all(np);
3127                 if (err) {
3128                         niudbg(PROBE, "tcam_flush_all failed, err=%d\n",
3129                                err);
3130                         goto out;
3131                 }
3132                 if (np->parent->plat_type != PLAT_TYPE_NIU) {
3133                         err = fflp_hash_clear(np);
3134                         if (err) {
3135                                 niudbg(PROBE, "fflp_hash_clear failed, "
3136                                        "err=%d\n", err);
3137                                 goto out;
3138                         }
3139                 }
3140
3141                 vlan_tbl_clear(np);
3142
3143                 niudbg(PROBE, "fflp_early_init: Success\n");
3144                 parent->flags |= PARENT_FLGS_CLS_HWINIT;
3145         }
3146 out:
3147         niu_unlock_parent(np, flags);
3148         return err;
3149 }
3150
3151 static int niu_set_flow_key(struct niu *np, unsigned long class_code, u64 key)
3152 {
3153         if (class_code < CLASS_CODE_USER_PROG1 ||
3154             class_code > CLASS_CODE_SCTP_IPV6)
3155                 return -EINVAL;
3156
3157         nw64(FLOW_KEY(class_code - CLASS_CODE_USER_PROG1), key);
3158         return 0;
3159 }
3160
3161 static int niu_set_tcam_key(struct niu *np, unsigned long class_code, u64 key)
3162 {
3163         if (class_code < CLASS_CODE_USER_PROG1 ||
3164             class_code > CLASS_CODE_SCTP_IPV6)
3165                 return -EINVAL;
3166
3167         nw64(TCAM_KEY(class_code - CLASS_CODE_USER_PROG1), key);
3168         return 0;
3169 }
3170
3171 static void niu_rx_skb_append(struct sk_buff *skb, struct page *page,
3172                               u32 offset, u32 size)
3173 {
3174         int i = skb_shinfo(skb)->nr_frags;
3175         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3176
3177         frag->page = page;
3178         frag->page_offset = offset;
3179         frag->size = size;
3180
3181         skb->len += size;
3182         skb->data_len += size;
3183         skb->truesize += size;
3184
3185         skb_shinfo(skb)->nr_frags = i + 1;
3186 }
3187
3188 static unsigned int niu_hash_rxaddr(struct rx_ring_info *rp, u64 a)
3189 {
3190         a >>= PAGE_SHIFT;
3191         a ^= (a >> ilog2(MAX_RBR_RING_SIZE));
3192
3193         return (a & (MAX_RBR_RING_SIZE - 1));
3194 }
3195
3196 static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr,
3197                                     struct page ***link)
3198 {
3199         unsigned int h = niu_hash_rxaddr(rp, addr);
3200         struct page *p, **pp;
3201
3202         addr &= PAGE_MASK;
3203         pp = &rp->rxhash[h];
3204         for (; (p = *pp) != NULL; pp = (struct page **) &p->mapping) {
3205                 if (p->index == addr) {
3206                         *link = pp;
3207                         break;
3208                 }
3209         }
3210
3211         return p;
3212 }
3213
3214 static void niu_hash_page(struct rx_ring_info *rp, struct page *page, u64 base)
3215 {
3216         unsigned int h = niu_hash_rxaddr(rp, base);
3217
3218         page->index = base;
3219         page->mapping = (struct address_space *) rp->rxhash[h];
3220         rp->rxhash[h] = page;
3221 }
3222
3223 static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp,
3224                             gfp_t mask, int start_index)
3225 {
3226         struct page *page;
3227         u64 addr;
3228         int i;
3229
3230         page = alloc_page(mask);
3231         if (!page)
3232                 return -ENOMEM;
3233
3234         addr = np->ops->map_page(np->device, page, 0,
3235                                  PAGE_SIZE, DMA_FROM_DEVICE);
3236
3237         niu_hash_page(rp, page, addr);
3238         if (rp->rbr_blocks_per_page > 1)
3239                 atomic_add(rp->rbr_blocks_per_page - 1,
3240                            &compound_head(page)->_count);
3241
3242         for (i = 0; i < rp->rbr_blocks_per_page; i++) {
3243                 __le32 *rbr = &rp->rbr[start_index + i];
3244
3245                 *rbr = cpu_to_le32(addr >> RBR_DESCR_ADDR_SHIFT);
3246                 addr += rp->rbr_block_size;
3247         }
3248
3249         return 0;
3250 }
3251
3252 static void niu_rbr_refill(struct niu *np, struct rx_ring_info *rp, gfp_t mask)
3253 {
3254         int index = rp->rbr_index;
3255
3256         rp->rbr_pending++;
3257         if ((rp->rbr_pending % rp->rbr_blocks_per_page) == 0) {
3258                 int err = niu_rbr_add_page(np, rp, mask, index);
3259
3260                 if (unlikely(err)) {
3261                         rp->rbr_pending--;
3262                         return;
3263                 }
3264
3265                 rp->rbr_index += rp->rbr_blocks_per_page;
3266                 BUG_ON(rp->rbr_index > rp->rbr_table_size);
3267                 if (rp->rbr_index == rp->rbr_table_size)
3268                         rp->rbr_index = 0;
3269
3270                 if (rp->rbr_pending >= rp->rbr_kick_thresh) {
3271                         nw64(RBR_KICK(rp->rx_channel), rp->rbr_pending);
3272                         rp->rbr_pending = 0;
3273                 }
3274         }
3275 }
3276
3277 static int niu_rx_pkt_ignore(struct niu *np, struct rx_ring_info *rp)
3278 {
3279         unsigned int index = rp->rcr_index;
3280         int num_rcr = 0;
3281
3282         rp->rx_dropped++;
3283         while (1) {
3284                 struct page *page, **link;
3285                 u64 addr, val;
3286                 u32 rcr_size;
3287
3288                 num_rcr++;
3289
3290                 val = le64_to_cpup(&rp->rcr[index]);
3291                 addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
3292                         RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
3293                 page = niu_find_rxpage(rp, addr, &link);
3294
3295                 rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
3296                                          RCR_ENTRY_PKTBUFSZ_SHIFT];
3297                 if ((page->index + PAGE_SIZE) - rcr_size == addr) {
3298                         *link = (struct page *) page->mapping;
3299                         np->ops->unmap_page(np->device, page->index,
3300                                             PAGE_SIZE, DMA_FROM_DEVICE);
3301                         page->index = 0;
3302                         page->mapping = NULL;
3303                         __free_page(page);
3304                         rp->rbr_refill_pending++;
3305                 }
3306
3307                 index = NEXT_RCR(rp, index);
3308                 if (!(val & RCR_ENTRY_MULTI))
3309                         break;
3310
3311         }
3312         rp->rcr_index = index;
3313
3314         return num_rcr;
3315 }
3316
3317 static int niu_process_rx_pkt(struct niu *np, struct rx_ring_info *rp)
3318 {
3319         unsigned int index = rp->rcr_index;
3320         struct sk_buff *skb;
3321         int len, num_rcr;
3322
3323         skb = netdev_alloc_skb(np->dev, RX_SKB_ALLOC_SIZE);
3324         if (unlikely(!skb))
3325                 return niu_rx_pkt_ignore(np, rp);
3326
3327         num_rcr = 0;
3328         while (1) {
3329                 struct page *page, **link;
3330                 u32 rcr_size, append_size;
3331                 u64 addr, val, off;
3332
3333                 num_rcr++;
3334
3335                 val = le64_to_cpup(&rp->rcr[index]);
3336
3337                 len = (val & RCR_ENTRY_L2_LEN) >>
3338                         RCR_ENTRY_L2_LEN_SHIFT;
3339                 len -= ETH_FCS_LEN;
3340
3341                 addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
3342                         RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
3343                 page = niu_find_rxpage(rp, addr, &link);
3344
3345                 rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
3346                                          RCR_ENTRY_PKTBUFSZ_SHIFT];
3347
3348                 off = addr & ~PAGE_MASK;
3349                 append_size = rcr_size;
3350                 if (num_rcr == 1) {
3351                         int ptype;
3352
3353                         off += 2;
3354                         append_size -= 2;
3355
3356                         ptype = (val >> RCR_ENTRY_PKT_TYPE_SHIFT);
3357                         if ((ptype == RCR_PKT_TYPE_TCP ||
3358                              ptype == RCR_PKT_TYPE_UDP) &&
3359                             !(val & (RCR_ENTRY_NOPORT |
3360                                      RCR_ENTRY_ERROR)))
3361                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3362                         else
3363                                 skb->ip_summed = CHECKSUM_NONE;
3364                 }
3365                 if (!(val & RCR_ENTRY_MULTI))
3366                         append_size = len - skb->len;
3367
3368                 niu_rx_skb_append(skb, page, off, append_size);
3369                 if ((page->index + rp->rbr_block_size) - rcr_size == addr) {
3370                         *link = (struct page *) page->mapping;
3371                         np->ops->unmap_page(np->device, page->index,
3372                                             PAGE_SIZE, DMA_FROM_DEVICE);
3373                         page->index = 0;
3374                         page->mapping = NULL;
3375                         rp->rbr_refill_pending++;
3376                 } else
3377                         get_page(page);
3378
3379                 index = NEXT_RCR(rp, index);
3380                 if (!(val & RCR_ENTRY_MULTI))
3381                         break;
3382
3383         }
3384         rp->rcr_index = index;
3385
3386         skb_reserve(skb, NET_IP_ALIGN);
3387         __pskb_pull_tail(skb, min(len, NIU_RXPULL_MAX));
3388
3389         rp->rx_packets++;
3390         rp->rx_bytes += skb->len;
3391
3392         skb->protocol = eth_type_trans(skb, np->dev);
3393         netif_receive_skb(skb);
3394
3395         return num_rcr;
3396 }
3397
3398 static int niu_rbr_fill(struct niu *np, struct rx_ring_info *rp, gfp_t mask)
3399 {
3400         int blocks_per_page = rp->rbr_blocks_per_page;
3401         int err, index = rp->rbr_index;
3402
3403         err = 0;
3404         while (index < (rp->rbr_table_size - blocks_per_page)) {
3405                 err = niu_rbr_add_page(np, rp, mask, index);
3406                 if (err)
3407                         break;
3408
3409                 index += blocks_per_page;
3410         }
3411
3412         rp->rbr_index = index;
3413         return err;
3414 }
3415
3416 static void niu_rbr_free(struct niu *np, struct rx_ring_info *rp)
3417 {
3418         int i;
3419
3420         for (i = 0; i < MAX_RBR_RING_SIZE; i++) {
3421                 struct page *page;
3422
3423                 page = rp->rxhash[i];
3424                 while (page) {
3425                         struct page *next = (struct page *) page->mapping;
3426                         u64 base = page->index;
3427
3428                         np->ops->unmap_page(np->device, base, PAGE_SIZE,
3429                                             DMA_FROM_DEVICE);
3430                         page->index = 0;
3431                         page->mapping = NULL;
3432
3433                         __free_page(page);
3434
3435                         page = next;
3436                 }
3437         }
3438
3439         for (i = 0; i < rp->rbr_table_size; i++)
3440                 rp->rbr[i] = cpu_to_le32(0);
3441         rp->rbr_index = 0;
3442 }
3443
3444 static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx)
3445 {
3446         struct tx_buff_info *tb = &rp->tx_buffs[idx];
3447         struct sk_buff *skb = tb->skb;
3448         struct tx_pkt_hdr *tp;
3449         u64 tx_flags;
3450         int i, len;
3451
3452         tp = (struct tx_pkt_hdr *) skb->data;
3453         tx_flags = le64_to_cpup(&tp->flags);
3454
3455         rp->tx_packets++;
3456         rp->tx_bytes += (((tx_flags & TXHDR_LEN) >> TXHDR_LEN_SHIFT) -
3457                          ((tx_flags & TXHDR_PAD) / 2));
3458
3459         len = skb_headlen(skb);
3460         np->ops->unmap_single(np->device, tb->mapping,
3461                               len, DMA_TO_DEVICE);
3462
3463         if (le64_to_cpu(rp->descr[idx]) & TX_DESC_MARK)
3464                 rp->mark_pending--;
3465
3466         tb->skb = NULL;
3467         do {
3468                 idx = NEXT_TX(rp, idx);
3469                 len -= MAX_TX_DESC_LEN;
3470         } while (len > 0);
3471
3472         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3473                 tb = &rp->tx_buffs[idx];
3474                 BUG_ON(tb->skb != NULL);
3475                 np->ops->unmap_page(np->device, tb->mapping,
3476                                     skb_shinfo(skb)->frags[i].size,
3477                                     DMA_TO_DEVICE);
3478                 idx = NEXT_TX(rp, idx);
3479         }
3480
3481         dev_kfree_skb(skb);
3482
3483         return idx;
3484 }
3485
3486 #define NIU_TX_WAKEUP_THRESH(rp)                ((rp)->pending / 4)
3487
3488 static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
3489 {
3490         struct netdev_queue *txq;
3491         u16 pkt_cnt, tmp;
3492         int cons, index;
3493         u64 cs;
3494
3495         index = (rp - np->tx_rings);
3496         txq = netdev_get_tx_queue(np->dev, index);
3497
3498         cs = rp->tx_cs;
3499         if (unlikely(!(cs & (TX_CS_MK | TX_CS_MMK))))
3500                 goto out;
3501
3502         tmp = pkt_cnt = (cs & TX_CS_PKT_CNT) >> TX_CS_PKT_CNT_SHIFT;
3503         pkt_cnt = (pkt_cnt - rp->last_pkt_cnt) &
3504                 (TX_CS_PKT_CNT >> TX_CS_PKT_CNT_SHIFT);
3505
3506         rp->last_pkt_cnt = tmp;
3507
3508         cons = rp->cons;
3509
3510         niudbg(TX_DONE, "%s: niu_tx_work() pkt_cnt[%u] cons[%d]\n",
3511                np->dev->name, pkt_cnt, cons);
3512
3513         while (pkt_cnt--)
3514                 cons = release_tx_packet(np, rp, cons);
3515
3516         rp->cons = cons;
3517         smp_mb();
3518
3519 out:
3520         if (unlikely(netif_tx_queue_stopped(txq) &&
3521                      (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) {
3522                 __netif_tx_lock(txq, smp_processor_id());
3523                 if (netif_tx_queue_stopped(txq) &&
3524                     (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))
3525                         netif_tx_wake_queue(txq);
3526                 __netif_tx_unlock(txq);
3527         }
3528 }
3529
3530 static int niu_rx_work(struct niu *np, struct rx_ring_info *rp, int budget)
3531 {
3532         int qlen, rcr_done = 0, work_done = 0;
3533         struct rxdma_mailbox *mbox = rp->mbox;
3534         u64 stat;
3535
3536 #if 1
3537         stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel));
3538         qlen = nr64(RCRSTAT_A(rp->rx_channel)) & RCRSTAT_A_QLEN;
3539 #else
3540         stat = le64_to_cpup(&mbox->rx_dma_ctl_stat);
3541         qlen = (le64_to_cpup(&mbox->rcrstat_a) & RCRSTAT_A_QLEN);
3542 #endif
3543         mbox->rx_dma_ctl_stat = 0;
3544         mbox->rcrstat_a = 0;
3545
3546         niudbg(RX_STATUS, "%s: niu_rx_work(chan[%d]), stat[%llx] qlen=%d\n",
3547                np->dev->name, rp->rx_channel, (unsigned long long) stat, qlen);
3548
3549         rcr_done = work_done = 0;
3550         qlen = min(qlen, budget);
3551         while (work_done < qlen) {
3552                 rcr_done += niu_process_rx_pkt(np, rp);
3553                 work_done++;
3554         }
3555
3556         if (rp->rbr_refill_pending >= rp->rbr_kick_thresh) {
3557                 unsigned int i;
3558
3559                 for (i = 0; i < rp->rbr_refill_pending; i++)
3560                         niu_rbr_refill(np, rp, GFP_ATOMIC);
3561                 rp->rbr_refill_pending = 0;
3562         }
3563
3564         stat = (RX_DMA_CTL_STAT_MEX |
3565                 ((u64)work_done << RX_DMA_CTL_STAT_PKTREAD_SHIFT) |
3566                 ((u64)rcr_done << RX_DMA_CTL_STAT_PTRREAD_SHIFT));
3567
3568         nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat);
3569
3570         return work_done;
3571 }
3572
3573 static int niu_poll_core(struct niu *np, struct niu_ldg *lp, int budget)
3574 {
3575         u64 v0 = lp->v0;
3576         u32 tx_vec = (v0 >> 32);
3577         u32 rx_vec = (v0 & 0xffffffff);
3578         int i, work_done = 0;
3579
3580         niudbg(INTR, "%s: niu_poll_core() v0[%016llx]\n",
3581                np->dev->name, (unsigned long long) v0);
3582
3583         for (i = 0; i < np->num_tx_rings; i++) {
3584                 struct tx_ring_info *rp = &np->tx_rings[i];
3585                 if (tx_vec & (1 << rp->tx_channel))
3586                         niu_tx_work(np, rp);
3587                 nw64(LD_IM0(LDN_TXDMA(rp->tx_channel)), 0);
3588         }
3589
3590         for (i = 0; i < np->num_rx_rings; i++) {
3591                 struct rx_ring_info *rp = &np->rx_rings[i];
3592
3593                 if (rx_vec & (1 << rp->rx_channel)) {
3594                         int this_work_done;
3595
3596                         this_work_done = niu_rx_work(np, rp,
3597                                                      budget);
3598
3599                         budget -= this_work_done;
3600                         work_done += this_work_done;
3601                 }
3602                 nw64(LD_IM0(LDN_RXDMA(rp->rx_channel)), 0);
3603         }
3604
3605         return work_done;
3606 }
3607
3608 static int niu_poll(struct napi_struct *napi, int budget)
3609 {
3610         struct niu_ldg *lp = container_of(napi, struct niu_ldg, napi);
3611         struct niu *np = lp->np;
3612         int work_done;
3613
3614         work_done = niu_poll_core(np, lp, budget);
3615
3616         if (work_done < budget) {
3617                 netif_rx_complete(np->dev, napi);
3618                 niu_ldg_rearm(np, lp, 1);
3619         }
3620         return work_done;
3621 }
3622
3623 static void niu_log_rxchan_errors(struct niu *np, struct rx_ring_info *rp,
3624                                   u64 stat)
3625 {
3626         dev_err(np->device, PFX "%s: RX channel %u errors ( ",
3627                 np->dev->name, rp->rx_channel);
3628
3629         if (stat & RX_DMA_CTL_STAT_RBR_TMOUT)
3630                 printk("RBR_TMOUT ");
3631         if (stat & RX_DMA_CTL_STAT_RSP_CNT_ERR)
3632                 printk("RSP_CNT ");
3633         if (stat & RX_DMA_CTL_STAT_BYTE_EN_BUS)
3634                 printk("BYTE_EN_BUS ");
3635         if (stat & RX_DMA_CTL_STAT_RSP_DAT_ERR)
3636                 printk("RSP_DAT ");
3637         if (stat & RX_DMA_CTL_STAT_RCR_ACK_ERR)
3638                 printk("RCR_ACK ");
3639         if (stat & RX_DMA_CTL_STAT_RCR_SHA_PAR)
3640                 printk("RCR_SHA_PAR ");
3641         if (stat & RX_DMA_CTL_STAT_RBR_PRE_PAR)
3642                 printk("RBR_PRE_PAR ");
3643         if (stat & RX_DMA_CTL_STAT_CONFIG_ERR)
3644                 printk("CONFIG ");
3645         if (stat & RX_DMA_CTL_STAT_RCRINCON)
3646                 printk("RCRINCON ");
3647         if (stat & RX_DMA_CTL_STAT_RCRFULL)
3648                 printk("RCRFULL ");
3649         if (stat & RX_DMA_CTL_STAT_RBRFULL)
3650                 printk("RBRFULL ");
3651         if (stat & RX_DMA_CTL_STAT_RBRLOGPAGE)
3652                 printk("RBRLOGPAGE ");
3653         if (stat & RX_DMA_CTL_STAT_CFIGLOGPAGE)
3654                 printk("CFIGLOGPAGE ");
3655         if (stat & RX_DMA_CTL_STAT_DC_FIFO_ERR)
3656                 printk("DC_FIDO ");
3657
3658         printk(")\n");
3659 }
3660
3661 static int niu_rx_error(struct niu *np, struct rx_ring_info *rp)
3662 {
3663         u64 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel));
3664         int err = 0;
3665
3666
3667         if (stat & (RX_DMA_CTL_STAT_CHAN_FATAL |
3668                     RX_DMA_CTL_STAT_PORT_FATAL))
3669                 err = -EINVAL;
3670
3671         if (err) {
3672                 dev_err(np->device, PFX "%s: RX channel %u error, stat[%llx]\n",
3673                         np->dev->name, rp->rx_channel,
3674                         (unsigned long long) stat);
3675
3676                 niu_log_rxchan_errors(np, rp, stat);
3677         }
3678
3679         nw64(RX_DMA_CTL_STAT(rp->rx_channel),
3680              stat & RX_DMA_CTL_WRITE_CLEAR_ERRS);
3681
3682         return err;
3683 }
3684
3685 static void niu_log_txchan_errors(struct niu *np, struct tx_ring_info *rp,
3686                                   u64 cs)
3687 {
3688         dev_err(np->device, PFX "%s: TX channel %u errors ( ",
3689                 np->dev->name, rp->tx_channel);
3690
3691         if (cs & TX_CS_MBOX_ERR)
3692                 printk("MBOX ");
3693         if (cs & TX_CS_PKT_SIZE_ERR)
3694                 printk("PKT_SIZE ");
3695         if (cs & TX_CS_TX_RING_OFLOW)
3696                 printk("TX_RING_OFLOW ");
3697         if (cs & TX_CS_PREF_BUF_PAR_ERR)
3698                 printk("PREF_BUF_PAR ");
3699         if (cs & TX_CS_NACK_PREF)
3700                 printk("NACK_PREF ");
3701         if (cs & TX_CS_NACK_PKT_RD)
3702                 printk("NACK_PKT_RD ");
3703         if (cs & TX_CS_CONF_PART_ERR)
3704                 printk("CONF_PART ");
3705         if (cs & TX_CS_PKT_PRT_ERR)
3706                 printk("PKT_PTR ");
3707
3708         printk(")\n");
3709 }
3710
3711 static int niu_tx_error(struct niu *np, struct tx_ring_info *rp)
3712 {
3713         u64 cs, logh, logl;
3714
3715         cs = nr64(TX_CS(rp->tx_channel));
3716         logh = nr64(TX_RNG_ERR_LOGH(rp->tx_channel));
3717         logl = nr64(TX_RNG_ERR_LOGL(rp->tx_channel));
3718
3719         dev_err(np->device, PFX "%s: TX channel %u error, "
3720                 "cs[%llx] logh[%llx] logl[%llx]\n",
3721                 np->dev->name, rp->tx_channel,
3722                 (unsigned long long) cs,
3723                 (unsigned long long) logh,
3724                 (unsigned long long) logl);
3725
3726         niu_log_txchan_errors(np, rp, cs);
3727
3728         return -ENODEV;
3729 }
3730
3731 static int niu_mif_interrupt(struct niu *np)
3732 {
3733         u64 mif_status = nr64(MIF_STATUS);
3734         int phy_mdint = 0;
3735
3736         if (np->flags & NIU_FLAGS_XMAC) {
3737                 u64 xrxmac_stat = nr64_mac(XRXMAC_STATUS);
3738
3739                 if (xrxmac_stat & XRXMAC_STATUS_PHY_MDINT)
3740                         phy_mdint = 1;
3741         }
3742
3743         dev_err(np->device, PFX "%s: MIF interrupt, "
3744                 "stat[%llx] phy_mdint(%d)\n",
3745                 np->dev->name, (unsigned long long) mif_status, phy_mdint);
3746
3747         return -ENODEV;
3748 }
3749
3750 static void niu_xmac_interrupt(struct niu *np)
3751 {
3752         struct niu_xmac_stats *mp = &np->mac_stats.xmac;
3753         u64 val;
3754
3755         val = nr64_mac(XTXMAC_STATUS);
3756         if (val & XTXMAC_STATUS_FRAME_CNT_EXP)
3757                 mp->tx_frames += TXMAC_FRM_CNT_COUNT;
3758         if (val & XTXMAC_STATUS_BYTE_CNT_EXP)
3759                 mp->tx_bytes += TXMAC_BYTE_CNT_COUNT;
3760         if (val & XTXMAC_STATUS_TXFIFO_XFR_ERR)
3761                 mp->tx_fifo_errors++;
3762         if (val & XTXMAC_STATUS_TXMAC_OFLOW)
3763                 mp->tx_overflow_errors++;
3764         if (val & XTXMAC_STATUS_MAX_PSIZE_ERR)
3765                 mp->tx_max_pkt_size_errors++;
3766         if (val & XTXMAC_STATUS_TXMAC_UFLOW)
3767                 mp->tx_underflow_errors++;
3768
3769         val = nr64_mac(XRXMAC_STATUS);
3770         if (val & XRXMAC_STATUS_LCL_FLT_STATUS)
3771                 mp->rx_local_faults++;
3772         if (val & XRXMAC_STATUS_RFLT_DET)
3773                 mp->rx_remote_faults++;
3774         if (val & XRXMAC_STATUS_LFLT_CNT_EXP)
3775                 mp->rx_link_faults += LINK_FAULT_CNT_COUNT;
3776         if (val & XRXMAC_STATUS_ALIGNERR_CNT_EXP)
3777                 mp->rx_align_errors += RXMAC_ALIGN_ERR_CNT_COUNT;
3778         if (val & XRXMAC_STATUS_RXFRAG_CNT_EXP)
3779                 mp->rx_frags += RXMAC_FRAG_CNT_COUNT;
3780         if (val & XRXMAC_STATUS_RXMULTF_CNT_EXP)
3781                 mp->rx_mcasts += RXMAC_MC_FRM_CNT_COUNT;
3782         if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
3783                 mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
3784         if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
3785                 mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
3786         if (val & XRXMAC_STATUS_RXHIST1_CNT_EXP)
3787                 mp->rx_hist_cnt1 += RXMAC_HIST_CNT1_COUNT;
3788         if (val & XRXMAC_STATUS_RXHIST2_CNT_EXP)
3789                 mp->rx_hist_cnt2 += RXMAC_HIST_CNT2_COUNT;
3790         if (val & XRXMAC_STATUS_RXHIST3_CNT_EXP)
3791                 mp->rx_hist_cnt3 += RXMAC_HIST_CNT3_COUNT;
3792         if (val & XRXMAC_STATUS_RXHIST4_CNT_EXP)
3793                 mp->rx_hist_cnt4 += RXMAC_HIST_CNT4_COUNT;
3794         if (val & XRXMAC_STATUS_RXHIST5_CNT_EXP)
3795                 mp->rx_hist_cnt5 += RXMAC_HIST_CNT5_COUNT;
3796         if (val & XRXMAC_STATUS_RXHIST6_CNT_EXP)
3797                 mp->rx_hist_cnt6 += RXMAC_HIST_CNT6_COUNT;
3798         if (val & XRXMAC_STATUS_RXHIST7_CNT_EXP)
3799                 mp->rx_hist_cnt7 += RXMAC_HIST_CNT7_COUNT;
3800         if (val & XRXMAC_STAT_MSK_RXOCTET_CNT_EXP)
3801                 mp->rx_octets += RXMAC_BT_CNT_COUNT;
3802         if (val & XRXMAC_STATUS_CVIOLERR_CNT_EXP)
3803                 mp->rx_code_violations += RXMAC_CD_VIO_CNT_COUNT;
3804         if (val & XRXMAC_STATUS_LENERR_CNT_EXP)
3805                 mp->rx_len_errors += RXMAC_MPSZER_CNT_COUNT;
3806         if (val & XRXMAC_STATUS_CRCERR_CNT_EXP)
3807                 mp->rx_crc_errors += RXMAC_CRC_ER_CNT_COUNT;
3808         if (val & XRXMAC_STATUS_RXUFLOW)
3809                 mp->rx_underflows++;
3810         if (val & XRXMAC_STATUS_RXOFLOW)
3811                 mp->rx_overflows++;
3812
3813         val = nr64_mac(XMAC_FC_STAT);
3814         if (val & XMAC_FC_STAT_TX_MAC_NPAUSE)
3815                 mp->pause_off_state++;
3816         if (val & XMAC_FC_STAT_TX_MAC_PAUSE)
3817                 mp->pause_on_state++;
3818         if (val & XMAC_FC_STAT_RX_MAC_RPAUSE)
3819                 mp->pause_received++;
3820 }
3821
3822 static void niu_bmac_interrupt(struct niu *np)
3823 {
3824         struct niu_bmac_stats *mp = &np->mac_stats.bmac;
3825         u64 val;
3826
3827         val = nr64_mac(BTXMAC_STATUS);
3828         if (val & BTXMAC_STATUS_UNDERRUN)
3829                 mp->tx_underflow_errors++;
3830         if (val & BTXMAC_STATUS_MAX_PKT_ERR)
3831                 mp->tx_max_pkt_size_errors++;
3832         if (val & BTXMAC_STATUS_BYTE_CNT_EXP)
3833                 mp->tx_bytes += BTXMAC_BYTE_CNT_COUNT;
3834         if (val & BTXMAC_STATUS_FRAME_CNT_EXP)
3835                 mp->tx_frames += BTXMAC_FRM_CNT_COUNT;
3836
3837         val = nr64_mac(BRXMAC_STATUS);
3838         if (val & BRXMAC_STATUS_OVERFLOW)
3839                 mp->rx_overflows++;
3840         if (val & BRXMAC_STATUS_FRAME_CNT_EXP)
3841                 mp->rx_frames += BRXMAC_FRAME_CNT_COUNT;
3842         if (val & BRXMAC_STATUS_ALIGN_ERR_EXP)
3843                 mp->rx_align_errors += BRXMAC_ALIGN_ERR_CNT_COUNT;
3844         if (val & BRXMAC_STATUS_CRC_ERR_EXP)
3845                 mp->rx_crc_errors += BRXMAC_ALIGN_ERR_CNT_COUNT;
3846         if (val & BRXMAC_STATUS_LEN_ERR_EXP)
3847                 mp->rx_len_errors += BRXMAC_CODE_VIOL_ERR_CNT_COUNT;
3848
3849         val = nr64_mac(BMAC_CTRL_STATUS);
3850         if (val & BMAC_CTRL_STATUS_NOPAUSE)
3851                 mp->pause_off_state++;
3852         if (val & BMAC_CTRL_STATUS_PAUSE)
3853                 mp->pause_on_state++;
3854         if (val & BMAC_CTRL_STATUS_PAUSE_RECV)
3855                 mp->pause_received++;
3856 }
3857
3858 static int niu_mac_interrupt(struct niu *np)
3859 {
3860         if (np->flags & NIU_FLAGS_XMAC)
3861                 niu_xmac_interrupt(np);
3862         else
3863                 niu_bmac_interrupt(np);
3864
3865         return 0;
3866 }
3867
3868 static void niu_log_device_error(struct niu *np, u64 stat)
3869 {
3870         dev_err(np->device, PFX "%s: Core device errors ( ",
3871                 np->dev->name);
3872
3873         if (stat & SYS_ERR_MASK_META2)
3874                 printk("META2 ");
3875         if (stat & SYS_ERR_MASK_META1)
3876                 printk("META1 ");
3877         if (stat & SYS_ERR_MASK_PEU)
3878                 printk("PEU ");
3879         if (stat & SYS_ERR_MASK_TXC)
3880                 printk("TXC ");
3881         if (stat & SYS_ERR_MASK_RDMC)
3882                 printk("RDMC ");
3883         if (stat & SYS_ERR_MASK_TDMC)
3884                 printk("TDMC ");
3885         if (stat & SYS_ERR_MASK_ZCP)
3886                 printk("ZCP ");
3887         if (stat & SYS_ERR_MASK_FFLP)
3888                 printk("FFLP ");
3889         if (stat & SYS_ERR_MASK_IPP)
3890                 printk("IPP ");
3891         if (stat & SYS_ERR_MASK_MAC)
3892                 printk("MAC ");
3893         if (stat & SYS_ERR_MASK_SMX)
3894                 printk("SMX ");
3895
3896         printk(")\n");
3897 }
3898
3899 static int niu_device_error(struct niu *np)
3900 {
3901         u64 stat = nr64(SYS_ERR_STAT);
3902
3903         dev_err(np->device, PFX "%s: Core device error, stat[%llx]\n",
3904                 np->dev->name, (unsigned long long) stat);
3905
3906         niu_log_device_error(np, stat);
3907
3908         return -ENODEV;
3909 }
3910
3911 static int niu_slowpath_interrupt(struct niu *np, struct niu_ldg *lp,
3912                               u64 v0, u64 v1, u64 v2)
3913 {
3914
3915         int i, err = 0;
3916
3917         lp->v0 = v0;
3918         lp->v1 = v1;
3919         lp->v2 = v2;
3920
3921         if (v1 & 0x00000000ffffffffULL) {
3922                 u32 rx_vec = (v1 & 0xffffffff);
3923
3924                 for (i = 0; i < np->num_rx_rings; i++) {
3925                         struct rx_ring_info *rp = &np->rx_rings[i];
3926
3927                         if (rx_vec & (1 << rp->rx_channel)) {
3928                                 int r = niu_rx_error(np, rp);
3929                                 if (r) {
3930                                         err = r;
3931                                 } else {
3932                                         if (!v0)
3933                                                 nw64(RX_DMA_CTL_STAT(rp->rx_channel),
3934                                                      RX_DMA_CTL_STAT_MEX);
3935                                 }
3936                         }
3937                 }
3938         }
3939         if (v1 & 0x7fffffff00000000ULL) {
3940                 u32 tx_vec = (v1 >> 32) & 0x7fffffff;
3941
3942                 for (i = 0; i < np->num_tx_rings; i++) {
3943                         struct tx_ring_info *rp = &np->tx_rings[i];
3944
3945                         if (tx_vec & (1 << rp->tx_channel)) {
3946                                 int r = niu_tx_error(np, rp);
3947                                 if (r)
3948                                         err = r;
3949                         }
3950                 }
3951         }
3952         if ((v0 | v1) & 0x8000000000000000ULL) {
3953                 int r = niu_mif_interrupt(np);
3954                 if (r)
3955                         err = r;
3956         }
3957         if (v2) {
3958                 if (v2 & 0x01ef) {
3959                         int r = niu_mac_interrupt(np);
3960                         if (r)
3961                                 err = r;
3962                 }
3963                 if (v2 & 0x0210) {
3964                         int r = niu_device_error(np);
3965                         if (r)
3966                                 err = r;
3967                 }
3968         }
3969
3970         if (err)
3971                 niu_enable_interrupts(np, 0);
3972
3973         return err;
3974 }
3975
3976 static void niu_rxchan_intr(struct niu *np, struct rx_ring_info *rp,
3977                             int ldn)
3978 {
3979         struct rxdma_mailbox *mbox = rp->mbox;
3980         u64 stat_write, stat = le64_to_cpup(&mbox->rx_dma_ctl_stat);
3981
3982         stat_write = (RX_DMA_CTL_STAT_RCRTHRES |
3983                       RX_DMA_CTL_STAT_RCRTO);
3984         nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat_write);
3985
3986         niudbg(INTR, "%s: rxchan_intr stat[%llx]\n",
3987                np->dev->name, (unsigned long long) stat);
3988 }
3989
3990 static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp,
3991                             int ldn)
3992 {
3993         rp->tx_cs = nr64(TX_CS(rp->tx_channel));
3994
3995         niudbg(INTR, "%s: txchan_intr cs[%llx]\n",
3996                np->dev->name, (unsigned long long) rp->tx_cs);
3997 }
3998
3999 static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0)
4000 {
4001         struct niu_parent *parent = np->parent;
4002         u32 rx_vec, tx_vec;
4003         int i;
4004
4005         tx_vec = (v0 >> 32);
4006         rx_vec = (v0 & 0xffffffff);
4007
4008         for (i = 0; i < np->num_rx_rings; i++) {
4009                 struct rx_ring_info *rp = &np->rx_rings[i];
4010                 int ldn = LDN_RXDMA(rp->rx_channel);
4011
4012                 if (parent->ldg_map[ldn] != ldg)
4013                         continue;
4014
4015                 nw64(LD_IM0(ldn), LD_IM0_MASK);
4016                 if (rx_vec & (1 << rp->rx_channel))
4017                         niu_rxchan_intr(np, rp, ldn);
4018         }
4019
4020         for (i = 0; i < np->num_tx_rings; i++) {
4021                 struct tx_ring_info *rp = &np->tx_rings[i];
4022                 int ldn = LDN_TXDMA(rp->tx_channel);
4023
4024                 if (parent->ldg_map[ldn] != ldg)
4025                         continue;
4026
4027                 nw64(LD_IM0(ldn), LD_IM0_MASK);
4028                 if (tx_vec & (1 << rp->tx_channel))
4029                         niu_txchan_intr(np, rp, ldn);
4030         }
4031 }
4032
4033 static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp,
4034                               u64 v0, u64 v1, u64 v2)
4035 {
4036         if (likely(netif_rx_schedule_prep(np->dev, &lp->napi))) {
4037                 lp->v0 = v0;
4038                 lp->v1 = v1;
4039                 lp->v2 = v2;
4040                 __niu_fastpath_interrupt(np, lp->ldg_num, v0);
4041                 __netif_rx_schedule(np->dev, &lp->napi);
4042         }
4043 }
4044
4045 static irqreturn_t niu_interrupt(int irq, void *dev_id)
4046 {
4047         struct niu_ldg *lp = dev_id;
4048         struct niu *np = lp->np;
4049         int ldg = lp->ldg_num;
4050         unsigned long flags;
4051         u64 v0, v1, v2;
4052
4053         if (netif_msg_intr(np))
4054                 printk(KERN_DEBUG PFX "niu_interrupt() ldg[%p](%d) ",
4055                        lp, ldg);
4056
4057         spin_lock_irqsave(&np->lock, flags);
4058
4059         v0 = nr64(LDSV0(ldg));
4060         v1 = nr64(LDSV1(ldg));
4061         v2 = nr64(LDSV2(ldg));
4062
4063         if (netif_msg_intr(np))
4064                 printk("v0[%llx] v1[%llx] v2[%llx]\n",
4065                        (unsigned long long) v0,
4066                        (unsigned long long) v1,
4067                        (unsigned long long) v2);
4068
4069         if (unlikely(!v0 && !v1 && !v2)) {
4070                 spin_unlock_irqrestore(&np->lock, flags);
4071                 return IRQ_NONE;
4072         }
4073
4074         if (unlikely((v0 & ((u64)1 << LDN_MIF)) || v1 || v2)) {
4075                 int err = niu_slowpath_interrupt(np, lp, v0, v1, v2);
4076                 if (err)
4077                         goto out;
4078         }
4079         if (likely(v0 & ~((u64)1 << LDN_MIF)))
4080                 niu_schedule_napi(np, lp, v0, v1, v2);
4081         else
4082                 niu_ldg_rearm(np, lp, 1);
4083 out:
4084         spin_unlock_irqrestore(&np->lock, flags);
4085
4086         return IRQ_HANDLED;
4087 }
4088
4089 static void niu_free_rx_ring_info(struct niu *np, struct rx_ring_info *rp)
4090 {
4091         if (rp->mbox) {
4092                 np->ops->free_coherent(np->device,
4093                                        sizeof(struct rxdma_mailbox),
4094                                        rp->mbox, rp->mbox_dma);
4095                 rp->mbox = NULL;
4096         }
4097         if (rp->rcr) {
4098                 np->ops->free_coherent(np->device,
4099                                        MAX_RCR_RING_SIZE * sizeof(__le64),
4100                                        rp->rcr, rp->rcr_dma);
4101                 rp->rcr = NULL;
4102                 rp->rcr_table_size = 0;
4103                 rp->rcr_index = 0;
4104         }
4105         if (rp->rbr) {
4106                 niu_rbr_free(np, rp);
4107
4108                 np->ops->free_coherent(np->device,
4109                                        MAX_RBR_RING_SIZE * sizeof(__le32),
4110                                        rp->rbr, rp->rbr_dma);
4111                 rp->rbr = NULL;
4112                 rp->rbr_table_size = 0;
4113                 rp->rbr_index = 0;
4114         }
4115         kfree(rp->rxhash);
4116         rp->rxhash = NULL;
4117 }
4118
4119 static void niu_free_tx_ring_info(struct niu *np, struct tx_ring_info *rp)
4120 {
4121         if (rp->mbox) {
4122                 np->ops->free_coherent(np->device,
4123                                        sizeof(struct txdma_mailbox),
4124                                        rp->mbox, rp->mbox_dma);
4125                 rp->mbox = NULL;
4126         }
4127         if (rp->descr) {
4128                 int i;
4129
4130                 for (i = 0; i < MAX_TX_RING_SIZE; i++) {
4131                         if (rp->tx_buffs[i].skb)
4132                                 (void) release_tx_packet(np, rp, i);
4133                 }
4134
4135                 np->ops->free_coherent(np->device,
4136                                        MAX_TX_RING_SIZE * sizeof(__le64),
4137                                        rp->descr, rp->descr_dma);
4138                 rp->descr = NULL;
4139                 rp->pending = 0;
4140                 rp->prod = 0;
4141                 rp->cons = 0;
4142                 rp->wrap_bit = 0;
4143         }
4144 }
4145
4146 static void niu_free_channels(struct niu *np)
4147 {
4148         int i;
4149
4150         if (np->rx_rings) {
4151                 for (i = 0; i < np->num_rx_rings; i++) {
4152                         struct rx_ring_info *rp = &np->rx_rings[i];
4153
4154                         niu_free_rx_ring_info(np, rp);
4155                 }
4156                 kfree(np->rx_rings);
4157                 np->rx_rings = NULL;
4158                 np->num_rx_rings = 0;
4159         }
4160
4161         if (np->tx_rings) {
4162                 for (i = 0; i < np->num_tx_rings; i++) {
4163                         struct tx_ring_info *rp = &np->tx_rings[i];
4164
4165                         niu_free_tx_ring_info(np, rp);
4166                 }
4167                 kfree(np->tx_rings);
4168                 np->tx_rings = NULL;
4169                 np->num_tx_rings = 0;
4170         }
4171 }
4172
4173 static int niu_alloc_rx_ring_info(struct niu *np,
4174                                   struct rx_ring_info *rp)
4175 {
4176         BUILD_BUG_ON(sizeof(struct rxdma_mailbox) != 64);
4177
4178         rp->rxhash = kzalloc(MAX_RBR_RING_SIZE * sizeof(struct page *),
4179                              GFP_KERNEL);
4180         if (!rp->rxhash)
4181                 return -ENOMEM;
4182
4183         rp->mbox = np->ops->alloc_coherent(np->device,
4184                                            sizeof(struct rxdma_mailbox),
4185                                            &rp->mbox_dma, GFP_KERNEL);
4186         if (!rp->mbox)
4187                 return -ENOMEM;
4188         if ((unsigned long)rp->mbox & (64UL - 1)) {
4189                 dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
4190                         "RXDMA mailbox %p\n", np->dev->name, rp->mbox);
4191                 return -EINVAL;
4192         }
4193
4194         rp->rcr = np->ops->alloc_coherent(np->device,
4195                                           MAX_RCR_RING_SIZE * sizeof(__le64),
4196                                           &rp->rcr_dma, GFP_KERNEL);
4197         if (!rp->rcr)
4198                 return -ENOMEM;
4199         if ((unsigned long)rp->rcr & (64UL - 1)) {
4200                 dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
4201                         "RXDMA RCR table %p\n", np->dev->name, rp->rcr);
4202                 return -EINVAL;
4203         }
4204         rp->rcr_table_size = MAX_RCR_RING_SIZE;
4205         rp->rcr_index = 0;
4206
4207         rp->rbr = np->ops->alloc_coherent(np->device,
4208                                           MAX_RBR_RING_SIZE * sizeof(__le32),
4209                                           &rp->rbr_dma, GFP_KERNEL);
4210         if (!rp->rbr)
4211                 return -ENOMEM;
4212         if ((unsigned long)rp->rbr & (64UL - 1)) {
4213                 dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
4214                         "RXDMA RBR table %p\n", np->dev->name, rp->rbr);
4215                 return -EINVAL;
4216         }
4217         rp->rbr_table_size = MAX_RBR_RING_SIZE;
4218         rp->rbr_index = 0;
4219         rp->rbr_pending = 0;
4220
4221         return 0;
4222 }
4223
4224 static void niu_set_max_burst(struct niu *np, struct tx_ring_info *rp)
4225 {
4226         int mtu = np->dev->mtu;
4227
4228         /* These values are recommended by the HW designers for fair
4229          * utilization of DRR amongst the rings.
4230          */
4231         rp->max_burst = mtu + 32;
4232         if (rp->max_burst > 4096)
4233                 rp->max_burst = 4096;
4234 }
4235
4236 static int niu_alloc_tx_ring_info(struct niu *np,
4237                                   struct tx_ring_info *rp)
4238 {
4239         BUILD_BUG_ON(sizeof(struct txdma_mailbox) != 64);
4240
4241         rp->mbox = np->ops->alloc_coherent(np->device,
4242                                            sizeof(struct txdma_mailbox),
4243                                            &rp->mbox_dma, GFP_KERNEL);
4244         if (!rp->mbox)
4245                 return -ENOMEM;
4246         if ((unsigned long)rp->mbox & (64UL - 1)) {
4247                 dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
4248                         "TXDMA mailbox %p\n", np->dev->name, rp->mbox);
4249                 return -EINVAL;
4250         }
4251
4252         rp->descr = np->ops->alloc_coherent(np->device,
4253                                             MAX_TX_RING_SIZE * sizeof(__le64),
4254                                             &rp->descr_dma, GFP_KERNEL);
4255         if (!rp->descr)
4256                 return -ENOMEM;
4257         if ((unsigned long)rp->descr & (64UL - 1)) {
4258                 dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
4259                         "TXDMA descr table %p\n", np->dev->name, rp->descr);
4260                 return -EINVAL;
4261         }
4262
4263         rp->pending = MAX_TX_RING_SIZE;
4264         rp->prod = 0;
4265         rp->cons = 0;
4266         rp->wrap_bit = 0;
4267
4268         /* XXX make these configurable... XXX */
4269         rp->mark_freq = rp->pending / 4;
4270
4271         niu_set_max_burst(np, rp);
4272
4273         return 0;
4274 }
4275
4276 static void niu_size_rbr(struct niu *np, struct rx_ring_info *rp)
4277 {
4278         u16 bss;
4279
4280         bss = min(PAGE_SHIFT, 15);
4281
4282         rp->rbr_block_size = 1 << bss;
4283         rp->rbr_blocks_per_page = 1 << (PAGE_SHIFT-bss);
4284
4285         rp->rbr_sizes[0] = 256;
4286         rp->rbr_sizes[1] = 1024;
4287         if (np->dev->mtu > ETH_DATA_LEN) {
4288                 switch (PAGE_SIZE) {
4289                 case 4 * 1024:
4290                         rp->rbr_sizes[2] = 4096;
4291                         break;
4292
4293                 default:
4294                         rp->rbr_sizes[2] = 8192;
4295                         break;
4296                 }
4297         } else {
4298                 rp->rbr_sizes[2] = 2048;
4299         }
4300         rp->rbr_sizes[3] = rp->rbr_block_size;
4301 }
4302
4303 static int niu_alloc_channels(struct niu *np)
4304 {
4305         struct niu_parent *parent = np->parent;
4306         int first_rx_channel, first_tx_channel;
4307         int i, port, err;
4308
4309         port = np->port;
4310         first_rx_channel = first_tx_channel = 0;
4311         for (i = 0; i < port; i++) {
4312                 first_rx_channel += parent->rxchan_per_port[i];
4313                 first_tx_channel += parent->txchan_per_port[i];
4314         }
4315
4316         np->num_rx_rings = parent->rxchan_per_port[port];
4317         np->num_tx_rings = parent->txchan_per_port[port];
4318
4319         np->dev->real_num_tx_queues = np->num_tx_rings;
4320
4321         np->rx_rings = kzalloc(np->num_rx_rings * sizeof(struct rx_ring_info),
4322                                GFP_KERNEL);
4323         err = -ENOMEM;
4324         if (!np->rx_rings)
4325                 goto out_err;
4326
4327         for (i = 0; i < np->num_rx_rings; i++) {
4328                 struct rx_ring_info *rp = &np->rx_rings[i];
4329
4330                 rp->np = np;
4331                 rp->rx_channel = first_rx_channel + i;
4332
4333                 err = niu_alloc_rx_ring_info(np, rp);
4334                 if (err)
4335                         goto out_err;
4336
4337                 niu_size_rbr(np, rp);
4338
4339                 /* XXX better defaults, configurable, etc... XXX */
4340                 rp->nonsyn_window = 64;
4341                 rp->nonsyn_threshold = rp->rcr_table_size - 64;
4342                 rp->syn_window = 64;
4343                 rp->syn_threshold = rp->rcr_table_size - 64;
4344                 rp->rcr_pkt_threshold = 16;
4345                 rp->rcr_timeout = 8;
4346                 rp->rbr_kick_thresh = RBR_REFILL_MIN;
4347                 if (rp->rbr_kick_thresh < rp->rbr_blocks_per_page)
4348                         rp->rbr_kick_thresh = rp->rbr_blocks_per_page;
4349
4350                 err = niu_rbr_fill(np, rp, GFP_KERNEL);
4351                 if (err)
4352                         return err;
4353         }
4354
4355         np->tx_rings = kzalloc(np->num_tx_rings * sizeof(struct tx_ring_info),
4356                                GFP_KERNEL);
4357         err = -ENOMEM;
4358         if (!np->tx_rings)
4359                 goto out_err;
4360
4361         for (i = 0; i < np->num_tx_rings; i++) {
4362                 struct tx_ring_info *rp = &np->tx_rings[i];
4363
4364                 rp->np = np;
4365                 rp->tx_channel = first_tx_channel + i;
4366
4367                 err = niu_alloc_tx_ring_info(np, rp);
4368                 if (err)
4369                         goto out_err;
4370         }
4371
4372         return 0;
4373
4374 out_err:
4375         niu_free_channels(np);
4376         return err;
4377 }
4378
4379 static int niu_tx_cs_sng_poll(struct niu *np, int channel)
4380 {
4381         int limit = 1000;
4382
4383         while (--limit > 0) {
4384                 u64 val = nr64(TX_CS(channel));
4385                 if (val & TX_CS_SNG_STATE)
4386                         return 0;
4387         }
4388         return -ENODEV;
4389 }
4390
4391 static int niu_tx_channel_stop(struct niu *np, int channel)
4392 {
4393         u64 val = nr64(TX_CS(channel));
4394
4395         val |= TX_CS_STOP_N_GO;
4396         nw64(TX_CS(channel), val);
4397
4398         return niu_tx_cs_sng_poll(np, channel);
4399 }
4400
4401 static int niu_tx_cs_reset_poll(struct niu *np, int channel)
4402 {
4403         int limit = 1000;
4404
4405         while (--limit > 0) {
4406                 u64 val = nr64(TX_CS(channel));
4407                 if (!(val & TX_CS_RST))
4408                         return 0;
4409         }
4410         return -ENODEV;
4411 }
4412
4413 static int niu_tx_channel_reset(struct niu *np, int channel)
4414 {
4415         u64 val = nr64(TX_CS(channel));
4416         int err;
4417
4418         val |= TX_CS_RST;
4419         nw64(TX_CS(channel), val);
4420
4421         err = niu_tx_cs_reset_poll(np, channel);
4422         if (!err)
4423                 nw64(TX_RING_KICK(channel), 0);
4424
4425         return err;
4426 }
4427
4428 static int niu_tx_channel_lpage_init(struct niu *np, int channel)
4429 {
4430         u64 val;
4431
4432         nw64(TX_LOG_MASK1(channel), 0);
4433         nw64(TX_LOG_VAL1(channel), 0);
4434         nw64(TX_LOG_MASK2(channel), 0);
4435         nw64(TX_LOG_VAL2(channel), 0);
4436         nw64(TX_LOG_PAGE_RELO1(channel), 0);
4437         nw64(TX_LOG_PAGE_RELO2(channel), 0);
4438         nw64(TX_LOG_PAGE_HDL(channel), 0);
4439
4440         val  = (u64)np->port << TX_LOG_PAGE_VLD_FUNC_SHIFT;
4441         val |= (TX_LOG_PAGE_VLD_PAGE0 | TX_LOG_PAGE_VLD_PAGE1);
4442         nw64(TX_LOG_PAGE_VLD(channel), val);
4443
4444         /* XXX TXDMA 32bit mode? XXX */
4445
4446         return 0;
4447 }
4448
4449 static void niu_txc_enable_port(struct niu *np, int on)
4450 {
4451         unsigned long flags;
4452         u64 val, mask;
4453
4454         niu_lock_parent(np, flags);
4455         val = nr64(TXC_CONTROL);
4456         mask = (u64)1 << np->port;
4457         if (on) {
4458                 val |= TXC_CONTROL_ENABLE | mask;
4459         } else {
4460                 val &= ~mask;
4461                 if ((val & ~TXC_CONTROL_ENABLE) == 0)
4462                         val &= ~TXC_CONTROL_ENABLE;
4463         }
4464         nw64(TXC_CONTROL, val);
4465         niu_unlock_parent(np, flags);
4466 }
4467
4468 static void niu_txc_set_imask(struct niu *np, u64 imask)
4469 {
4470         unsigned long flags;
4471         u64 val;
4472
4473         niu_lock_parent(np, flags);
4474         val = nr64(TXC_INT_MASK);
4475         val &= ~TXC_INT_MASK_VAL(np->port);
4476         val |= (imask << TXC_INT_MASK_VAL_SHIFT(np->port));
4477         niu_unlock_parent(np, flags);
4478 }
4479
4480 static void niu_txc_port_dma_enable(struct niu *np, int on)
4481 {
4482         u64 val = 0;
4483
4484         if (on) {
4485                 int i;
4486
4487                 for (i = 0; i < np->num_tx_rings; i++)
4488                         val |= (1 << np->tx_rings[i].tx_channel);
4489         }
4490         nw64(TXC_PORT_DMA(np->port), val);
4491 }
4492
4493 static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
4494 {
4495         int err, channel = rp->tx_channel;
4496         u64 val, ring_len;
4497
4498         err = niu_tx_channel_stop(np, channel);
4499         if (err)
4500                 return err;
4501
4502         err = niu_tx_channel_reset(np, channel);
4503         if (err)
4504                 return err;
4505
4506         err = niu_tx_channel_lpage_init(np, channel);
4507         if (err)
4508                 return err;
4509
4510         nw64(TXC_DMA_MAX(channel), rp->max_burst);
4511         nw64(TX_ENT_MSK(channel), 0);
4512
4513         if (rp->descr_dma & ~(TX_RNG_CFIG_STADDR_BASE |
4514                               TX_RNG_CFIG_STADDR)) {
4515                 dev_err(np->device, PFX "%s: TX ring channel %d "
4516                         "DMA addr (%llx) is not aligned.\n",
4517                         np->dev->name, channel,
4518                         (unsigned long long) rp->descr_dma);
4519                 return -EINVAL;
4520         }
4521
4522         /* The length field in TX_RNG_CFIG is measured in 64-byte
4523          * blocks.  rp->pending is the number of TX descriptors in
4524          * our ring, 8 bytes each, thus we divide by 8 bytes more
4525          * to get the proper value the chip wants.
4526          */
4527         ring_len = (rp->pending / 8);
4528
4529         val = ((ring_len << TX_RNG_CFIG_LEN_SHIFT) |
4530                rp->descr_dma);
4531         nw64(TX_RNG_CFIG(channel), val);
4532
4533         if (((rp->mbox_dma >> 32) & ~TXDMA_MBH_MBADDR) ||
4534             ((u32)rp->mbox_dma & ~TXDMA_MBL_MBADDR)) {
4535                 dev_err(np->device, PFX "%s: TX ring channel %d "
4536                         "MBOX addr (%llx) is has illegal bits.\n",
4537                         np->dev->name, channel,
4538                         (unsigned long long) rp->mbox_dma);
4539                 return -EINVAL;
4540         }
4541         nw64(TXDMA_MBH(channel), rp->mbox_dma >> 32);
4542         nw64(TXDMA_MBL(channel), rp->mbox_dma & TXDMA_MBL_MBADDR);
4543
4544         nw64(TX_CS(channel), 0);
4545
4546         rp->last_pkt_cnt = 0;
4547
4548         return 0;
4549 }
4550
4551 static void niu_init_rdc_groups(struct niu *np)
4552 {
4553         struct niu_rdc_tables *tp = &np->parent->rdc_group_cfg[np->port];
4554         int i, first_table_num = tp->first_table_num;
4555
4556         for (i = 0; i < tp->num_tables; i++) {
4557                 struct rdc_table *tbl = &tp->tables[i];
4558                 int this_table = first_table_num + i;
4559                 int slot;
4560
4561                 for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++)
4562                         nw64(RDC_TBL(this_table, slot),
4563                              tbl->rxdma_channel[slot]);
4564         }
4565
4566         nw64(DEF_RDC(np->port), np->parent->rdc_default[np->port]);
4567 }
4568
4569 static void niu_init_drr_weight(struct niu *np)
4570 {
4571         int type = phy_decode(np->parent->port_phy, np->port);
4572         u64 val;
4573
4574         switch (type) {
4575         case PORT_TYPE_10G:
4576                 val = PT_DRR_WEIGHT_DEFAULT_10G;
4577                 break;
4578
4579         case PORT_TYPE_1G:
4580         default:
4581                 val = PT_DRR_WEIGHT_DEFAULT_1G;
4582                 break;
4583         }
4584         nw64(PT_DRR_WT(np->port), val);
4585 }
4586
4587 static int niu_init_hostinfo(struct niu *np)
4588 {
4589         struct niu_parent *parent = np->parent;
4590         struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
4591         int i, err, num_alt = niu_num_alt_addr(np);
4592         int first_rdc_table = tp->first_table_num;
4593
4594         err = niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
4595         if (err)
4596                 return err;
4597
4598         err = niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
4599         if (err)
4600                 return err;
4601
4602         for (i = 0; i < num_alt; i++) {
4603                 err = niu_set_alt_mac_rdc_table(np, i, first_rdc_table, 1);
4604                 if (err)
4605                         return err;
4606         }
4607
4608         return 0;
4609 }
4610
4611 static int niu_rx_channel_reset(struct niu *np, int channel)
4612 {
4613         return niu_set_and_wait_clear(np, RXDMA_CFIG1(channel),
4614                                       RXDMA_CFIG1_RST, 1000, 10,
4615                                       "RXDMA_CFIG1");
4616 }
4617
4618 static int niu_rx_channel_lpage_init(struct niu *np, int channel)
4619 {
4620         u64 val;
4621
4622         nw64(RX_LOG_MASK1(channel), 0);
4623         nw64(RX_LOG_VAL1(channel), 0);
4624         nw64(RX_LOG_MASK2(channel), 0);
4625         nw64(RX_LOG_VAL2(channel), 0);
4626         nw64(RX_LOG_PAGE_RELO1(channel), 0);
4627         nw64(RX_LOG_PAGE_RELO2(channel), 0);
4628         nw64(RX_LOG_PAGE_HDL(channel), 0);
4629
4630         val  = (u64)np->port << RX_LOG_PAGE_VLD_FUNC_SHIFT;
4631         val |= (RX_LOG_PAGE_VLD_PAGE0 | RX_LOG_PAGE_VLD_PAGE1);
4632         nw64(RX_LOG_PAGE_VLD(channel), val);
4633
4634         return 0;
4635 }
4636
4637 static void niu_rx_channel_wred_init(struct niu *np, struct rx_ring_info *rp)
4638 {
4639         u64 val;
4640
4641         val = (((u64)rp->nonsyn_window << RDC_RED_PARA_WIN_SHIFT) |
4642                ((u64)rp->nonsyn_threshold << RDC_RED_PARA_THRE_SHIFT) |
4643                ((u64)rp->syn_window << RDC_RED_PARA_WIN_SYN_SHIFT) |
4644                ((u64)rp->syn_threshold << RDC_RED_PARA_THRE_SYN_SHIFT));
4645         nw64(RDC_RED_PARA(rp->rx_channel), val);
4646 }
4647
4648 static int niu_compute_rbr_cfig_b(struct rx_ring_info *rp, u64 *ret)
4649 {
4650         u64 val = 0;
4651
4652         switch (rp->rbr_block_size) {
4653         case 4 * 1024:
4654                 val |= (RBR_BLKSIZE_4K << RBR_CFIG_B_BLKSIZE_SHIFT);
4655                 break;
4656         case 8 * 1024:
4657                 val |= (RBR_BLKSIZE_8K << RBR_CFIG_B_BLKSIZE_SHIFT);
4658                 break;
4659         case 16 * 1024:
4660                 val |= (RBR_BLKSIZE_16K << RBR_CFIG_B_BLKSIZE_SHIFT);
4661                 break;
4662         case 32 * 1024:
4663                 val |= (RBR_BLKSIZE_32K << RBR_CFIG_B_BLKSIZE_SHIFT);
4664                 break;
4665         default:
4666                 return -EINVAL;
4667         }
4668         val |= RBR_CFIG_B_VLD2;
4669         switch (rp->rbr_sizes[2]) {
4670         case 2 * 1024:
4671                 val |= (RBR_BUFSZ2_2K << RBR_CFIG_B_BUFSZ2_SHIFT);
4672                 break;
4673         case 4 * 1024:
4674                 val |= (RBR_BUFSZ2_4K << RBR_CFIG_B_BUFSZ2_SHIFT);
4675                 break;
4676         case 8 * 1024:
4677                 val |= (RBR_BUFSZ2_8K << RBR_CFIG_B_BUFSZ2_SHIFT);
4678                 break;
4679         case 16 * 1024:
4680                 val |= (RBR_BUFSZ2_16K << RBR_CFIG_B_BUFSZ2_SHIFT);
4681                 break;
4682
4683         default:
4684                 return -EINVAL;
4685         }
4686         val |= RBR_CFIG_B_VLD1;
4687         switch (rp->rbr_sizes[1]) {
4688         case 1 * 1024:
4689                 val |= (RBR_BUFSZ1_1K << RBR_CFIG_B_BUFSZ1_SHIFT);
4690                 break;
4691         case 2 * 1024:
4692                 val |= (RBR_BUFSZ1_2K << RBR_CFIG_B_BUFSZ1_SHIFT);
4693                 break;
4694         case 4 * 1024:
4695                 val |= (RBR_BUFSZ1_4K << RBR_CFIG_B_BUFSZ1_SHIFT);
4696                 break;
4697         case 8 * 1024:
4698                 val |= (RBR_BUFSZ1_8K << RBR_CFIG_B_BUFSZ1_SHIFT);
4699                 break;
4700
4701         default:
4702                 return -EINVAL;
4703         }
4704         val |= RBR_CFIG_B_VLD0;
4705         switch (rp->rbr_sizes[0]) {
4706         case 256:
4707                 val |= (RBR_BUFSZ0_256 << RBR_CFIG_B_BUFSZ0_SHIFT);
4708                 break;
4709         case 512:
4710                 val |= (RBR_BUFSZ0_512 << RBR_CFIG_B_BUFSZ0_SHIFT);
4711                 break;
4712         case 1 * 1024:
4713                 val |= (RBR_BUFSZ0_1K << RBR_CFIG_B_BUFSZ0_SHIFT);
4714                 break;
4715         case 2 * 1024:
4716                 val |= (RBR_BUFSZ0_2K << RBR_CFIG_B_BUFSZ0_SHIFT);
4717                 break;
4718
4719         default:
4720                 return -EINVAL;
4721         }
4722
4723         *ret = val;
4724         return 0;
4725 }
4726
4727 static int niu_enable_rx_channel(struct niu *np, int channel, int on)
4728 {
4729         u64 val = nr64(RXDMA_CFIG1(channel));
4730         int limit;
4731
4732         if (on)
4733                 val |= RXDMA_CFIG1_EN;
4734         else
4735                 val &= ~RXDMA_CFIG1_EN;
4736         nw64(RXDMA_CFIG1(channel), val);
4737
4738         limit = 1000;
4739         while (--limit > 0) {
4740                 if (nr64(RXDMA_CFIG1(channel)) & RXDMA_CFIG1_QST)
4741                         break;
4742                 udelay(10);
4743         }
4744         if (limit <= 0)
4745                 return -ENODEV;
4746         return 0;
4747 }
4748
4749 static int niu_init_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
4750 {
4751         int err, channel = rp->rx_channel;
4752         u64 val;
4753
4754         err = niu_rx_channel_reset(np, channel);
4755         if (err)
4756                 return err;
4757
4758         err = niu_rx_channel_lpage_init(np, channel);
4759         if (err)
4760                 return err;
4761
4762         niu_rx_channel_wred_init(np, rp);
4763
4764         nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_RBR_EMPTY);
4765         nw64(RX_DMA_CTL_STAT(channel),
4766              (RX_DMA_CTL_STAT_MEX |
4767               RX_DMA_CTL_STAT_RCRTHRES |
4768               RX_DMA_CTL_STAT_RCRTO |
4769               RX_DMA_CTL_STAT_RBR_EMPTY));
4770         nw64(RXDMA_CFIG1(channel), rp->mbox_dma >> 32);
4771         nw64(RXDMA_CFIG2(channel), (rp->mbox_dma & 0x00000000ffffffc0));
4772         nw64(RBR_CFIG_A(channel),
4773              ((u64)rp->rbr_table_size << RBR_CFIG_A_LEN_SHIFT) |
4774              (rp->rbr_dma & (RBR_CFIG_A_STADDR_BASE | RBR_CFIG_A_STADDR)));
4775         err = niu_compute_rbr_cfig_b(rp, &val);
4776         if (err)
4777                 return err;
4778         nw64(RBR_CFIG_B(channel), val);
4779         nw64(RCRCFIG_A(channel),
4780              ((u64)rp->rcr_table_size << RCRCFIG_A_LEN_SHIFT) |
4781              (rp->rcr_dma & (RCRCFIG_A_STADDR_BASE | RCRCFIG_A_STADDR)));
4782         nw64(RCRCFIG_B(channel),
4783              ((u64)rp->rcr_pkt_threshold << RCRCFIG_B_PTHRES_SHIFT) |
4784              RCRCFIG_B_ENTOUT |
4785              ((u64)rp->rcr_timeout << RCRCFIG_B_TIMEOUT_SHIFT));
4786
4787         err = niu_enable_rx_channel(np, channel, 1);
4788         if (err)
4789                 return err;
4790
4791         nw64(RBR_KICK(channel), rp->rbr_index);
4792
4793         val = nr64(RX_DMA_CTL_STAT(channel));
4794         val |= RX_DMA_CTL_STAT_RBR_EMPTY;
4795         nw64(RX_DMA_CTL_STAT(channel), val);
4796
4797         return 0;
4798 }
4799
4800 static int niu_init_rx_channels(struct niu *np)
4801 {
4802         unsigned long flags;
4803         u64 seed = jiffies_64;
4804         int err, i;
4805
4806         niu_lock_parent(np, flags);
4807         nw64(RX_DMA_CK_DIV, np->parent->rxdma_clock_divider);
4808         nw64(RED_RAN_INIT, RED_RAN_INIT_OPMODE | (seed & RED_RAN_INIT_VAL));
4809         niu_unlock_parent(np, flags);
4810
4811         /* XXX RXDMA 32bit mode? XXX */
4812
4813         niu_init_rdc_groups(np);
4814         niu_init_drr_weight(np);
4815
4816         err = niu_init_hostinfo(np);
4817         if (err)
4818                 return err;
4819
4820         for (i = 0; i < np->num_rx_rings; i++) {
4821                 struct rx_ring_info *rp = &np->rx_rings[i];
4822
4823                 err = niu_init_one_rx_channel(np, rp);
4824                 if (err)
4825                         return err;
4826         }
4827
4828         return 0;
4829 }
4830
4831 static int niu_set_ip_frag_rule(struct niu *np)
4832 {
4833         struct niu_parent *parent = np->parent;
4834         struct niu_classifier *cp = &np->clas;
4835         struct niu_tcam_entry *tp;
4836         int index, err;
4837
4838         /* XXX fix this allocation scheme XXX */
4839         index = cp->tcam_index;
4840         tp = &parent->tcam[index];
4841
4842         /* Note that the noport bit is the same in both ipv4 and
4843          * ipv6 format TCAM entries.
4844          */
4845         memset(tp, 0, sizeof(*tp));
4846         tp->key[1] = TCAM_V4KEY1_NOPORT;
4847         tp->key_mask[1] = TCAM_V4KEY1_NOPORT;
4848         tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET |
4849                           ((u64)0 << TCAM_ASSOCDATA_OFFSET_SHIFT));
4850         err = tcam_write(np, index, tp->key, tp->key_mask);
4851         if (err)
4852                 return err;
4853         err = tcam_assoc_write(np, index, tp->assoc_data);
4854         if (err)
4855                 return err;
4856
4857         return 0;
4858 }
4859
4860 static int niu_init_classifier_hw(struct niu *np)
4861 {
4862         struct niu_parent *parent = np->parent;
4863         struct niu_classifier *cp = &np->clas;
4864         int i, err;
4865
4866         nw64(H1POLY, cp->h1_init);
4867         nw64(H2POLY, cp->h2_init);
4868
4869         err = niu_init_hostinfo(np);
4870         if (err)
4871                 return err;
4872
4873         for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++) {
4874                 struct niu_vlan_rdc *vp = &cp->vlan_mappings[i];
4875
4876                 vlan_tbl_write(np, i, np->port,
4877                                vp->vlan_pref, vp->rdc_num);
4878         }
4879
4880         for (i = 0; i < cp->num_alt_mac_mappings; i++) {
4881                 struct niu_altmac_rdc *ap = &cp->alt_mac_mappings[i];
4882
4883                 err = niu_set_alt_mac_rdc_table(np, ap->alt_mac_num,
4884                                                 ap->rdc_num, ap->mac_pref);
4885                 if (err)
4886                         return err;
4887         }
4888
4889         for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) {
4890                 int index = i - CLASS_CODE_USER_PROG1;
4891
4892                 err = niu_set_tcam_key(np, i, parent->tcam_key[index]);
4893                 if (err)
4894                         return err;
4895                 err = niu_set_flow_key(np, i, parent->flow_key[index]);
4896                 if (err)
4897                         return err;
4898         }
4899
4900         err = niu_set_ip_frag_rule(np);
4901         if (err)
4902                 return err;
4903
4904         tcam_enable(np, 1);
4905
4906         return 0;
4907 }
4908
4909 static int niu_zcp_write(struct niu *np, int index, u64 *data)
4910 {
4911         nw64(ZCP_RAM_DATA0, data[0]);
4912         nw64(ZCP_RAM_DATA1, data[1]);
4913         nw64(ZCP_RAM_DATA2, data[2]);
4914         nw64(ZCP_RAM_DATA3, data[3]);
4915         nw64(ZCP_RAM_DATA4, data[4]);
4916         nw64(ZCP_RAM_BE, ZCP_RAM_BE_VAL);
4917         nw64(ZCP_RAM_ACC,
4918              (ZCP_RAM_ACC_WRITE |
4919               (0 << ZCP_RAM_ACC_ZFCID_SHIFT) |
4920               (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT)));
4921
4922         return niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
4923                                    1000, 100);
4924 }
4925
4926 static int niu_zcp_read(struct niu *np, int index, u64 *data)
4927 {
4928         int err;
4929
4930         err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
4931                                   1000, 100);
4932         if (err) {
4933                 dev_err(np->device, PFX "%s: ZCP read busy won't clear, "
4934                         "ZCP_RAM_ACC[%llx]\n", np->dev->name,
4935                         (unsigned long long) nr64(ZCP_RAM_ACC));
4936                 return err;
4937         }
4938
4939         nw64(ZCP_RAM_ACC,
4940              (ZCP_RAM_ACC_READ |
4941               (0 << ZCP_RAM_ACC_ZFCID_SHIFT) |
4942               (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT)));
4943
4944         err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
4945                                   1000, 100);
4946         if (err) {
4947                 dev_err(np->device, PFX "%s: ZCP read busy2 won't clear, "
4948                         "ZCP_RAM_ACC[%llx]\n", np->dev->name,
4949                         (unsigned long long) nr64(ZCP_RAM_ACC));
4950                 return err;
4951         }
4952
4953         data[0] = nr64(ZCP_RAM_DATA0);
4954         data[1] = nr64(ZCP_RAM_DATA1);
4955         data[2] = nr64(ZCP_RAM_DATA2);
4956         data[3] = nr64(ZCP_RAM_DATA3);
4957         data[4] = nr64(ZCP_RAM_DATA4);
4958
4959         return 0;
4960 }
4961
4962 static void niu_zcp_cfifo_reset(struct niu *np)
4963 {
4964         u64 val = nr64(RESET_CFIFO);
4965
4966         val |= RESET_CFIFO_RST(np->port);
4967         nw64(RESET_CFIFO, val);
4968         udelay(10);
4969
4970         val &= ~RESET_CFIFO_RST(np->port);
4971         nw64(RESET_CFIFO, val);
4972 }
4973
4974 static int niu_init_zcp(struct niu *np)
4975 {
4976         u64 data[5], rbuf[5];
4977         int i, max, err;
4978
4979         if (np->parent->plat_type != PLAT_TYPE_NIU) {
4980                 if (np->port == 0 || np->port == 1)
4981                         max = ATLAS_P0_P1_CFIFO_ENTRIES;
4982                 else
4983                         max = ATLAS_P2_P3_CFIFO_ENTRIES;
4984         } else
4985                 max = NIU_CFIFO_ENTRIES;
4986
4987         data[0] = 0;
4988         data[1] = 0;
4989         data[2] = 0;
4990         data[3] = 0;
4991         data[4] = 0;
4992
4993         for (i = 0; i < max; i++) {
4994                 err = niu_zcp_write(np, i, data);
4995                 if (err)
4996                         return err;
4997                 err = niu_zcp_read(np, i, rbuf);
4998                 if (err)
4999                         return err;
5000         }
5001
5002         niu_zcp_cfifo_reset(np);
5003         nw64(CFIFO_ECC(np->port), 0);
5004         nw64(ZCP_INT_STAT, ZCP_INT_STAT_ALL);
5005         (void) nr64(ZCP_INT_STAT);
5006         nw64(ZCP_INT_MASK, ZCP_INT_MASK_ALL);
5007
5008         return 0;
5009 }
5010
5011 static void niu_ipp_write(struct niu *np, int index, u64 *data)
5012 {
5013         u64 val = nr64_ipp(IPP_CFIG);
5014
5015         nw64_ipp(IPP_CFIG, val | IPP_CFIG_DFIFO_PIO_W);
5016         nw64_ipp(IPP_DFIFO_WR_PTR, index);
5017         nw64_ipp(IPP_DFIFO_WR0, data[0]);
5018         nw64_ipp(IPP_DFIFO_WR1, data[1]);
5019         nw64_ipp(IPP_DFIFO_WR2, data[2]);
5020         nw64_ipp(IPP_DFIFO_WR3, data[3]);
5021         nw64_ipp(IPP_DFIFO_WR4, data[4]);
5022         nw64_ipp(IPP_CFIG, val & ~IPP_CFIG_DFIFO_PIO_W);
5023 }
5024
5025 static void niu_ipp_read(struct niu *np, int index, u64 *data)
5026 {
5027         nw64_ipp(IPP_DFIFO_RD_PTR, index);
5028         data[0] = nr64_ipp(IPP_DFIFO_RD0);
5029         data[1] = nr64_ipp(IPP_DFIFO_RD1);
5030         data[2] = nr64_ipp(IPP_DFIFO_RD2);
5031         data[3] = nr64_ipp(IPP_DFIFO_RD3);
5032         data[4] = nr64_ipp(IPP_DFIFO_RD4);
5033 }
5034
5035 static int niu_ipp_reset(struct niu *np)
5036 {
5037         return niu_set_and_wait_clear_ipp(np, IPP_CFIG, IPP_CFIG_SOFT_RST,
5038                                           1000, 100, "IPP_CFIG");
5039 }
5040
5041 static int niu_init_ipp(struct niu *np)
5042 {
5043         u64 data[5], rbuf[5], val;
5044         int i, max, err;
5045
5046         if (np->parent->plat_type != PLAT_TYPE_NIU) {
5047                 if (np->port == 0 || np->port == 1)
5048                         max = ATLAS_P0_P1_DFIFO_ENTRIES;
5049                 else
5050                         max = ATLAS_P2_P3_DFIFO_ENTRIES;
5051         } else
5052                 max = NIU_DFIFO_ENTRIES;
5053
5054         data[0] = 0;
5055         data[1] = 0;
5056         data[2] = 0;
5057         data[3] = 0;
5058         data[4] = 0;
5059
5060         for (i = 0; i < max; i++) {
5061                 niu_ipp_write(np, i, data);
5062                 niu_ipp_read(np, i, rbuf);
5063         }
5064
5065         (void) nr64_ipp(IPP_INT_STAT);
5066         (void) nr64_ipp(IPP_INT_STAT);
5067
5068         err = niu_ipp_reset(np);
5069         if (err)
5070                 return err;
5071
5072         (void) nr64_ipp(IPP_PKT_DIS);
5073         (void) nr64_ipp(IPP_BAD_CS_CNT);
5074         (void) nr64_ipp(IPP_ECC);
5075
5076         (void) nr64_ipp(IPP_INT_STAT);
5077
5078         nw64_ipp(IPP_MSK, ~IPP_MSK_ALL);
5079
5080         val = nr64_ipp(IPP_CFIG);
5081         val &= ~IPP_CFIG_IP_MAX_PKT;
5082         val |= (IPP_CFIG_IPP_ENABLE |
5083                 IPP_CFIG_DFIFO_ECC_EN |
5084                 IPP_CFIG_DROP_BAD_CRC |
5085                 IPP_CFIG_CKSUM_EN |
5086                 (0x1ffff << IPP_CFIG_IP_MAX_PKT_SHIFT));
5087         nw64_ipp(IPP_CFIG, val);
5088
5089         return 0;
5090 }
5091
5092 static void niu_handle_led(struct niu *np, int status)
5093 {
5094         u64 val;
5095         val = nr64_mac(XMAC_CONFIG);
5096
5097         if ((np->flags & NIU_FLAGS_10G) != 0 &&
5098             (np->flags & NIU_FLAGS_FIBER) != 0) {
5099                 if (status) {
5100                         val |= XMAC_CONFIG_LED_POLARITY;
5101                         val &= ~XMAC_CONFIG_FORCE_LED_ON;
5102                 } else {
5103                         val |= XMAC_CONFIG_FORCE_LED_ON;
5104                         val &= ~XMAC_CONFIG_LED_POLARITY;
5105                 }
5106         }
5107
5108         nw64_mac(XMAC_CONFIG, val);
5109 }
5110
5111 static void niu_init_xif_xmac(struct niu *np)
5112 {
5113         struct niu_link_config *lp = &np->link_config;
5114         u64 val;
5115
5116         if (np->flags & NIU_FLAGS_XCVR_SERDES) {
5117                 val = nr64(MIF_CONFIG);
5118                 val |= MIF_CONFIG_ATCA_GE;
5119                 nw64(MIF_CONFIG, val);
5120         }
5121
5122         val = nr64_mac(XMAC_CONFIG);
5123         val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC;
5124
5125         val |= XMAC_CONFIG_TX_OUTPUT_EN;
5126
5127         if (lp->loopback_mode == LOOPBACK_MAC) {
5128                 val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC;
5129                 val |= XMAC_CONFIG_LOOPBACK;
5130         } else {
5131                 val &= ~XMAC_CONFIG_LOOPBACK;
5132         }
5133
5134         if (np->flags & NIU_FLAGS_10G) {
5135                 val &= ~XMAC_CONFIG_LFS_DISABLE;
5136         } else {
5137                 val |= XMAC_CONFIG_LFS_DISABLE;
5138                 if (!(np->flags & NIU_FLAGS_FIBER) &&
5139                     !(np->flags & NIU_FLAGS_XCVR_SERDES))
5140                         val |= XMAC_CONFIG_1G_PCS_BYPASS;
5141                 else
5142                         val &= ~XMAC_CONFIG_1G_PCS_BYPASS;
5143         }
5144
5145         val &= ~XMAC_CONFIG_10G_XPCS_BYPASS;
5146
5147         if (lp->active_speed == SPEED_100)
5148                 val |= XMAC_CONFIG_SEL_CLK_25MHZ;
5149         else
5150                 val &= ~XMAC_CONFIG_SEL_CLK_25MHZ;
5151
5152         nw64_mac(XMAC_CONFIG, val);
5153
5154         val = nr64_mac(XMAC_CONFIG);
5155         val &= ~XMAC_CONFIG_MODE_MASK;
5156         if (np->flags & NIU_FLAGS_10G) {
5157                 val |= XMAC_CONFIG_MODE_XGMII;
5158         } else {
5159                 if (lp->active_speed == SPEED_100)
5160                         val |= XMAC_CONFIG_MODE_MII;
5161                 else
5162                         val |= XMAC_CONFIG_MODE_GMII;
5163         }
5164
5165         nw64_mac(XMAC_CONFIG, val);
5166 }
5167
5168 static void niu_init_xif_bmac(struct niu *np)
5169 {
5170         struct niu_link_config *lp = &np->link_config;
5171         u64 val;
5172
5173         val = BMAC_XIF_CONFIG_TX_OUTPUT_EN;
5174
5175         if (lp->loopback_mode == LOOPBACK_MAC)
5176                 val |= BMAC_XIF_CONFIG_MII_LOOPBACK;
5177         else
5178                 val &= ~BMAC_XIF_CONFIG_MII_LOOPBACK;
5179
5180         if (lp->active_speed == SPEED_1000)
5181                 val |= BMAC_XIF_CONFIG_GMII_MODE;
5182         else
5183                 val &= ~BMAC_XIF_CONFIG_GMII_MODE;
5184
5185         val &= ~(BMAC_XIF_CONFIG_LINK_LED |
5186                  BMAC_XIF_CONFIG_LED_POLARITY);
5187
5188         if (!(np->flags & NIU_FLAGS_10G) &&
5189             !(np->flags & NIU_FLAGS_FIBER) &&
5190             lp->active_speed == SPEED_100)
5191                 val |= BMAC_XIF_CONFIG_25MHZ_CLOCK;
5192         else
5193                 val &= ~BMAC_XIF_CONFIG_25MHZ_CLOCK;
5194
5195         nw64_mac(BMAC_XIF_CONFIG, val);
5196 }
5197
5198 static void niu_init_xif(struct niu *np)
5199 {
5200         if (np->flags & NIU_FLAGS_XMAC)
5201                 niu_init_xif_xmac(np);
5202         else
5203                 niu_init_xif_bmac(np);
5204 }
5205
5206 static void niu_pcs_mii_reset(struct niu *np)
5207 {
5208         int limit = 1000;
5209         u64 val = nr64_pcs(PCS_MII_CTL);
5210         val |= PCS_MII_CTL_RST;
5211         nw64_pcs(PCS_MII_CTL, val);
5212         while ((--limit >= 0) && (val & PCS_MII_CTL_RST)) {
5213                 udelay(100);
5214                 val = nr64_pcs(PCS_MII_CTL);
5215         }
5216 }
5217
5218 static void niu_xpcs_reset(struct niu *np)
5219 {
5220         int limit = 1000;
5221         u64 val = nr64_xpcs(XPCS_CONTROL1);
5222         val |= XPCS_CONTROL1_RESET;
5223         nw64_xpcs(XPCS_CONTROL1, val);
5224         while ((--limit >= 0) && (val & XPCS_CONTROL1_RESET)) {
5225                 udelay(100);
5226                 val = nr64_xpcs(XPCS_CONTROL1);
5227         }
5228 }
5229
5230 static int niu_init_pcs(struct niu *np)
5231 {
5232         struct niu_link_config *lp = &np->link_config;
5233         u64 val;
5234
5235         switch (np->flags & (NIU_FLAGS_10G |
5236                              NIU_FLAGS_FIBER |
5237                              NIU_FLAGS_XCVR_SERDES)) {
5238         case NIU_FLAGS_FIBER:
5239                 /* 1G fiber */
5240                 nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE);
5241                 nw64_pcs(PCS_DPATH_MODE, 0);
5242                 niu_pcs_mii_reset(np);
5243                 break;
5244
5245         case NIU_FLAGS_10G:
5246         case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
5247         case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
5248                 /* 10G SERDES */
5249                 if (!(np->flags & NIU_FLAGS_XMAC))
5250                         return -EINVAL;
5251
5252                 /* 10G copper or fiber */
5253                 val = nr64_mac(XMAC_CONFIG);
5254                 val &= ~XMAC_CONFIG_10G_XPCS_BYPASS;
5255                 nw64_mac(XMAC_CONFIG, val);
5256
5257                 niu_xpcs_reset(np);
5258
5259                 val = nr64_xpcs(XPCS_CONTROL1);
5260                 if (lp->loopback_mode == LOOPBACK_PHY)
5261                         val |= XPCS_CONTROL1_LOOPBACK;
5262                 else
5263                         val &= ~XPCS_CONTROL1_LOOPBACK;
5264                 nw64_xpcs(XPCS_CONTROL1, val);
5265
5266                 nw64_xpcs(XPCS_DESKEW_ERR_CNT, 0);
5267                 (void) nr64_xpcs(XPCS_SYMERR_CNT01);
5268                 (void) nr64_xpcs(XPCS_SYMERR_CNT23);
5269                 break;
5270
5271
5272         case NIU_FLAGS_XCVR_SERDES:
5273                 /* 1G SERDES */
5274                 niu_pcs_mii_reset(np);
5275                 nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE);
5276                 nw64_pcs(PCS_DPATH_MODE, 0);
5277                 break;
5278
5279         case 0:
5280                 /* 1G copper */
5281         case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER:
5282                 /* 1G RGMII FIBER */
5283                 nw64_pcs(PCS_DPATH_MODE, PCS_DPATH_MODE_MII);
5284                 niu_pcs_mii_reset(np);
5285                 break;
5286
5287         default:
5288                 return -EINVAL;
5289         }
5290
5291         return 0;
5292 }
5293
5294 static int niu_reset_tx_xmac(struct niu *np)
5295 {
5296         return niu_set_and_wait_clear_mac(np, XTXMAC_SW_RST,
5297                                           (XTXMAC_SW_RST_REG_RS |
5298                                            XTXMAC_SW_RST_SOFT_RST),
5299                                           1000, 100, "XTXMAC_SW_RST");
5300 }
5301
5302 static int niu_reset_tx_bmac(struct niu *np)
5303 {
5304         int limit;
5305
5306         nw64_mac(BTXMAC_SW_RST, BTXMAC_SW_RST_RESET);
5307         limit = 1000;
5308         while (--limit >= 0) {
5309                 if (!(nr64_mac(BTXMAC_SW_RST) & BTXMAC_SW_RST_RESET))
5310                         break;
5311                 udelay(100);
5312         }
5313         if (limit < 0) {
5314                 dev_err(np->device, PFX "Port %u TX BMAC would not reset, "
5315                         "BTXMAC_SW_RST[%llx]\n",
5316                         np->port,
5317                         (unsigned long long) nr64_mac(BTXMAC_SW_RST));
5318                 return -ENODEV;
5319         }
5320
5321         return 0;
5322 }
5323
5324 static int niu_reset_tx_mac(struct niu *np)
5325 {
5326         if (np->flags & NIU_FLAGS_XMAC)
5327                 return niu_reset_tx_xmac(np);
5328         else
5329                 return niu_reset_tx_bmac(np);
5330 }
5331
5332 static void niu_init_tx_xmac(struct niu *np, u64 min, u64 max)
5333 {
5334         u64 val;
5335
5336         val = nr64_mac(XMAC_MIN);
5337         val &= ~(XMAC_MIN_TX_MIN_PKT_SIZE |
5338                  XMAC_MIN_RX_MIN_PKT_SIZE);
5339         val |= (min << XMAC_MIN_RX_MIN_PKT_SIZE_SHFT);
5340         val |= (min << XMAC_MIN_TX_MIN_PKT_SIZE_SHFT);
5341         nw64_mac(XMAC_MIN, val);
5342
5343         nw64_mac(XMAC_MAX, max);
5344
5345         nw64_mac(XTXMAC_STAT_MSK, ~(u64)0);
5346
5347         val = nr64_mac(XMAC_IPG);
5348         if (np->flags & NIU_FLAGS_10G) {
5349                 val &= ~XMAC_IPG_IPG_XGMII;
5350                 val |= (IPG_12_15_XGMII << XMAC_IPG_IPG_XGMII_SHIFT);
5351         } else {
5352                 val &= ~XMAC_IPG_IPG_MII_GMII;
5353                 val |= (IPG_12_MII_GMII << XMAC_IPG_IPG_MII_GMII_SHIFT);
5354         }
5355         nw64_mac(XMAC_IPG, val);
5356
5357         val = nr64_mac(XMAC_CONFIG);
5358         val &= ~(XMAC_CONFIG_ALWAYS_NO_CRC |
5359                  XMAC_CONFIG_STRETCH_MODE |
5360                  XMAC_CONFIG_VAR_MIN_IPG_EN |
5361                  XMAC_CONFIG_TX_ENABLE);
5362         nw64_mac(XMAC_CONFIG, val);
5363
5364         nw64_mac(TXMAC_FRM_CNT, 0);
5365         nw64_mac(TXMAC_BYTE_CNT, 0);
5366 }
5367
5368 static void niu_init_tx_bmac(struct niu *np, u64 min, u64 max)
5369 {
5370         u64 val;
5371
5372         nw64_mac(BMAC_MIN_FRAME, min);
5373         nw64_mac(BMAC_MAX_FRAME, max);
5374
5375         nw64_mac(BTXMAC_STATUS_MASK, ~(u64)0);
5376         nw64_mac(BMAC_CTRL_TYPE, 0x8808);
5377         nw64_mac(BMAC_PREAMBLE_SIZE, 7);
5378
5379         val = nr64_mac(BTXMAC_CONFIG);
5380         val &= ~(BTXMAC_CONFIG_FCS_DISABLE |
5381                  BTXMAC_CONFIG_ENABLE);
5382         nw64_mac(BTXMAC_CONFIG, val);
5383 }
5384
5385 static void niu_init_tx_mac(struct niu *np)
5386 {
5387         u64 min, max;
5388
5389         min = 64;
5390         if (np->dev->mtu > ETH_DATA_LEN)
5391                 max = 9216;
5392         else
5393                 max = 1522;
5394
5395         /* The XMAC_MIN register only accepts values for TX min which
5396          * have the low 3 bits cleared.
5397          */
5398         BUILD_BUG_ON(min & 0x7);
5399
5400         if (np->flags & NIU_FLAGS_XMAC)
5401                 niu_init_tx_xmac(np, min, max);
5402         else
5403                 niu_init_tx_bmac(np, min, max);
5404 }
5405
5406 static int niu_reset_rx_xmac(struct niu *np)
5407 {
5408         int limit;
5409
5410         nw64_mac(XRXMAC_SW_RST,
5411                  XRXMAC_SW_RST_REG_RS | XRXMAC_SW_RST_SOFT_RST);
5412         limit = 1000;
5413         while (--limit >= 0) {
5414                 if (!(nr64_mac(XRXMAC_SW_RST) & (XRXMAC_SW_RST_REG_RS |
5415                                                  XRXMAC_SW_RST_SOFT_RST)))
5416                     break;
5417                 udelay(100);
5418         }
5419         if (limit < 0) {
5420                 dev_err(np->device, PFX "Port %u RX XMAC would not reset, "
5421                         "XRXMAC_SW_RST[%llx]\n",
5422                         np->port,
5423                         (unsigned long long) nr64_mac(XRXMAC_SW_RST));
5424                 return -ENODEV;
5425         }
5426
5427         return 0;
5428 }
5429
5430 static int niu_reset_rx_bmac(struct niu *np)
5431 {
5432         int limit;
5433
5434         nw64_mac(BRXMAC_SW_RST, BRXMAC_SW_RST_RESET);
5435         limit = 1000;
5436         while (--limit >= 0) {
5437                 if (!(nr64_mac(BRXMAC_SW_RST) & BRXMAC_SW_RST_RESET))
5438                         break;
5439                 udelay(100);
5440         }
5441         if (limit < 0) {
5442                 dev_err(np->device, PFX "Port %u RX BMAC would not reset, "
5443                         "BRXMAC_SW_RST[%llx]\n",
5444                         np->port,
5445                         (unsigned long long) nr64_mac(BRXMAC_SW_RST));
5446                 return -ENODEV;
5447         }
5448
5449         return 0;
5450 }
5451
5452 static int niu_reset_rx_mac(struct niu *np)
5453 {
5454         if (np->flags & NIU_FLAGS_XMAC)
5455                 return niu_reset_rx_xmac(np);
5456         else
5457                 return niu_reset_rx_bmac(np);
5458 }
5459
5460 static void niu_init_rx_xmac(struct niu *np)
5461 {
5462         struct niu_parent *parent = np->parent;
5463         struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
5464         int first_rdc_table = tp->first_table_num;
5465         unsigned long i;
5466         u64 val;
5467
5468         nw64_mac(XMAC_ADD_FILT0, 0);
5469         nw64_mac(XMAC_ADD_FILT1, 0);
5470         nw64_mac(XMAC_ADD_FILT2, 0);
5471         nw64_mac(XMAC_ADD_FILT12_MASK, 0);
5472         nw64_mac(XMAC_ADD_FILT00_MASK, 0);
5473         for (i = 0; i < MAC_NUM_HASH; i++)
5474                 nw64_mac(XMAC_HASH_TBL(i), 0);
5475         nw64_mac(XRXMAC_STAT_MSK, ~(u64)0);
5476         niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
5477         niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
5478
5479         val = nr64_mac(XMAC_CONFIG);
5480         val &= ~(XMAC_CONFIG_RX_MAC_ENABLE |
5481                  XMAC_CONFIG_PROMISCUOUS |
5482                  XMAC_CONFIG_PROMISC_GROUP |
5483                  XMAC_CONFIG_ERR_CHK_DIS |
5484                  XMAC_CONFIG_RX_CRC_CHK_DIS |
5485                  XMAC_CONFIG_RESERVED_MULTICAST |
5486                  XMAC_CONFIG_RX_CODEV_CHK_DIS |
5487                  XMAC_CONFIG_ADDR_FILTER_EN |
5488                  XMAC_CONFIG_RCV_PAUSE_ENABLE |
5489                  XMAC_CONFIG_STRIP_CRC |
5490                  XMAC_CONFIG_PASS_FLOW_CTRL |
5491                  XMAC_CONFIG_MAC2IPP_PKT_CNT_EN);
5492         val |= (XMAC_CONFIG_HASH_FILTER_EN);
5493         nw64_mac(XMAC_CONFIG, val);
5494
5495         nw64_mac(RXMAC_BT_CNT, 0);
5496         nw64_mac(RXMAC_BC_FRM_CNT, 0);
5497         nw64_mac(RXMAC_MC_FRM_CNT, 0);
5498         nw64_mac(RXMAC_FRAG_CNT, 0);
5499         nw64_mac(RXMAC_HIST_CNT1, 0);
5500         nw64_mac(RXMAC_HIST_CNT2, 0);
5501         nw64_mac(RXMAC_HIST_CNT3, 0);
5502         nw64_mac(RXMAC_HIST_CNT4, 0);
5503         nw64_mac(RXMAC_HIST_CNT5, 0);
5504         nw64_mac(RXMAC_HIST_CNT6, 0);
5505         nw64_mac(RXMAC_HIST_CNT7, 0);
5506         nw64_mac(RXMAC_MPSZER_CNT, 0);
5507         nw64_mac(RXMAC_CRC_ER_CNT, 0);
5508         nw64_mac(RXMAC_CD_VIO_CNT, 0);
5509         nw64_mac(LINK_FAULT_CNT, 0);
5510 }
5511
5512 static void niu_init_rx_bmac(struct niu *np)
5513 {
5514         struct niu_parent *parent = np->parent;
5515         struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
5516         int first_rdc_table = tp->first_table_num;
5517         unsigned long i;
5518         u64 val;
5519
5520         nw64_mac(BMAC_ADD_FILT0, 0);
5521         nw64_mac(BMAC_ADD_FILT1, 0);
5522         nw64_mac(BMAC_ADD_FILT2, 0);
5523         nw64_mac(BMAC_ADD_FILT12_MASK, 0);
5524         nw64_mac(BMAC_ADD_FILT00_MASK, 0);
5525         for (i = 0; i < MAC_NUM_HASH; i++)
5526                 nw64_mac(BMAC_HASH_TBL(i), 0);
5527         niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
5528         niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
5529         nw64_mac(BRXMAC_STATUS_MASK, ~(u64)0);
5530
5531         val = nr64_mac(BRXMAC_CONFIG);
5532         val &= ~(BRXMAC_CONFIG_ENABLE |
5533                  BRXMAC_CONFIG_STRIP_PAD |
5534                  BRXMAC_CONFIG_STRIP_FCS |
5535                  BRXMAC_CONFIG_PROMISC |
5536                  BRXMAC_CONFIG_PROMISC_GRP |
5537                  BRXMAC_CONFIG_ADDR_FILT_EN |
5538                  BRXMAC_CONFIG_DISCARD_DIS);
5539         val |= (BRXMAC_CONFIG_HASH_FILT_EN);
5540         nw64_mac(BRXMAC_CONFIG, val);
5541
5542         val = nr64_mac(BMAC_ADDR_CMPEN);
5543         val |= BMAC_ADDR_CMPEN_EN0;
5544         nw64_mac(BMAC_ADDR_CMPEN, val);
5545 }
5546
5547 static void niu_init_rx_mac(struct niu *np)
5548 {
5549         niu_set_primary_mac(np, np->dev->dev_addr);
5550
5551         if (np->flags & NIU_FLAGS_XMAC)
5552                 niu_init_rx_xmac(np);
5553         else
5554                 niu_init_rx_bmac(np);
5555 }
5556
5557 static void niu_enable_tx_xmac(struct niu *np, int on)
5558 {
5559         u64 val = nr64_mac(XMAC_CONFIG);
5560
5561         if (on)
5562                 val |= XMAC_CONFIG_TX_ENABLE;
5563         else
5564                 val &= ~XMAC_CONFIG_TX_ENABLE;
5565         nw64_mac(XMAC_CONFIG, val);
5566 }
5567
5568 static void niu_enable_tx_bmac(struct niu *np, int on)
5569 {
5570         u64 val = nr64_mac(BTXMAC_CONFIG);
5571
5572         if (on)
5573                 val |= BTXMAC_CONFIG_ENABLE;
5574         else
5575                 val &= ~BTXMAC_CONFIG_ENABLE;
5576         nw64_mac(BTXMAC_CONFIG, val);
5577 }
5578
5579 static void niu_enable_tx_mac(struct niu *np, int on)
5580 {
5581         if (np->flags & NIU_FLAGS_XMAC)
5582                 niu_enable_tx_xmac(np, on);
5583         else
5584                 niu_enable_tx_bmac(np, on);
5585 }
5586
5587 static void niu_enable_rx_xmac(struct niu *np, int on)
5588 {
5589         u64 val = nr64_mac(XMAC_CONFIG);
5590
5591         val &= ~(XMAC_CONFIG_HASH_FILTER_EN |
5592                  XMAC_CONFIG_PROMISCUOUS);
5593
5594         if (np->flags & NIU_FLAGS_MCAST)
5595                 val |= XMAC_CONFIG_HASH_FILTER_EN;
5596         if (np->flags & NIU_FLAGS_PROMISC)
5597                 val |= XMAC_CONFIG_PROMISCUOUS;
5598
5599         if (on)
5600                 val |= XMAC_CONFIG_RX_MAC_ENABLE;
5601         else
5602                 val &= ~XMAC_CONFIG_RX_MAC_ENABLE;
5603         nw64_mac(XMAC_CONFIG, val);
5604 }
5605
5606 static void niu_enable_rx_bmac(struct niu *np, int on)
5607 {
5608         u64 val = nr64_mac(BRXMAC_CONFIG);
5609
5610         val &= ~(BRXMAC_CONFIG_HASH_FILT_EN |
5611                  BRXMAC_CONFIG_PROMISC);
5612
5613         if (np->flags & NIU_FLAGS_MCAST)
5614                 val |= BRXMAC_CONFIG_HASH_FILT_EN;
5615         if (np->flags & NIU_FLAGS_PROMISC)
5616                 val |= BRXMAC_CONFIG_PROMISC;
5617
5618         if (on)
5619                 val |= BRXMAC_CONFIG_ENABLE;
5620         else
5621                 val &= ~BRXMAC_CONFIG_ENABLE;
5622         nw64_mac(BRXMAC_CONFIG, val);
5623 }
5624
5625 static void niu_enable_rx_mac(struct niu *np, int on)
5626 {
5627         if (np->flags & NIU_FLAGS_XMAC)
5628                 niu_enable_rx_xmac(np, on);
5629         else
5630                 niu_enable_rx_bmac(np, on);
5631 }
5632
5633 static int niu_init_mac(struct niu *np)
5634 {
5635         int err;
5636
5637         niu_init_xif(np);
5638         err = niu_init_pcs(np);
5639         if (err)
5640                 return err;
5641
5642         err = niu_reset_tx_mac(np);
5643         if (err)
5644                 return err;
5645         niu_init_tx_mac(np);
5646         err = niu_reset_rx_mac(np);
5647         if (err)
5648                 return err;
5649         niu_init_rx_mac(np);
5650
5651         /* This looks hookey but the RX MAC reset we just did will
5652          * undo some of the state we setup in niu_init_tx_mac() so we
5653          * have to call it again.  In particular, the RX MAC reset will
5654          * set the XMAC_MAX register back to it's default value.
5655          */
5656         niu_init_tx_mac(np);
5657         niu_enable_tx_mac(np, 1);
5658
5659         niu_enable_rx_mac(np, 1);
5660
5661         return 0;
5662 }
5663
5664 static void niu_stop_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
5665 {
5666         (void) niu_tx_channel_stop(np, rp->tx_channel);
5667 }
5668
5669 static void niu_stop_tx_channels(struct niu *np)
5670 {
5671         int i;
5672
5673         for (i = 0; i < np->num_tx_rings; i++) {
5674                 struct tx_ring_info *rp = &np->tx_rings[i];
5675
5676                 niu_stop_one_tx_channel(np, rp);
5677         }
5678 }
5679
5680 static void niu_reset_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
5681 {
5682         (void) niu_tx_channel_reset(np, rp->tx_channel);
5683 }
5684
5685 static void niu_reset_tx_channels(struct niu *np)
5686 {
5687         int i;
5688
5689         for (i = 0; i < np->num_tx_rings; i++) {
5690                 struct tx_ring_info *rp = &np->tx_rings[i];
5691
5692                 niu_reset_one_tx_channel(np, rp);
5693         }
5694 }
5695
5696 static void niu_stop_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
5697 {
5698         (void) niu_enable_rx_channel(np, rp->rx_channel, 0);
5699 }
5700
5701 static void niu_stop_rx_channels(struct niu *np)
5702 {
5703         int i;
5704
5705         for (i = 0; i < np->num_rx_rings; i++) {
5706                 struct rx_ring_info *rp = &np->rx_rings[i];
5707
5708                 niu_stop_one_rx_channel(np, rp);
5709         }
5710 }
5711
5712 static void niu_reset_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
5713 {
5714         int channel = rp->rx_channel;
5715
5716         (void) niu_rx_channel_reset(np, channel);
5717         nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_ALL);
5718         nw64(RX_DMA_CTL_STAT(channel), 0);
5719         (void) niu_enable_rx_channel(np, channel, 0);
5720 }
5721
5722 static void niu_reset_rx_channels(struct niu *np)
5723 {
5724         int i;
5725
5726         for (i = 0; i < np->num_rx_rings; i++) {
5727                 struct rx_ring_info *rp = &np->rx_rings[i];
5728
5729                 niu_reset_one_rx_channel(np, rp);
5730         }
5731 }
5732
5733 static void niu_disable_ipp(struct niu *np)
5734 {
5735         u64 rd, wr, val;
5736         int limit;
5737
5738         rd = nr64_ipp(IPP_DFIFO_RD_PTR);
5739         wr = nr64_ipp(IPP_DFIFO_WR_PTR);
5740         limit = 100;
5741         while (--limit >= 0 && (rd != wr)) {
5742                 rd = nr64_ipp(IPP_DFIFO_RD_PTR);
5743                 wr = nr64_ipp(IPP_DFIFO_WR_PTR);
5744         }
5745         if (limit < 0 &&
5746             (rd != 0 && wr != 1)) {
5747                 dev_err(np->device, PFX "%s: IPP would not quiesce, "
5748                         "rd_ptr[%llx] wr_ptr[%llx]\n",
5749                         np->dev->name,
5750                         (unsigned long long) nr64_ipp(IPP_DFIFO_RD_PTR),
5751                         (unsigned long long) nr64_ipp(IPP_DFIFO_WR_PTR));
5752         }
5753
5754         val = nr64_ipp(IPP_CFIG);
5755         val &= ~(IPP_CFIG_IPP_ENABLE |
5756                  IPP_CFIG_DFIFO_ECC_EN |
5757                  IPP_CFIG_DROP_BAD_CRC |
5758                  IPP_CFIG_CKSUM_EN);
5759         nw64_ipp(IPP_CFIG, val);
5760
5761         (void) niu_ipp_reset(np);
5762 }
5763
5764 static int niu_init_hw(struct niu *np)
5765 {
5766         int i, err;
5767
5768         niudbg(IFUP, "%s: Initialize TXC\n", np->dev->name);
5769         niu_txc_enable_port(np, 1);
5770         niu_txc_port_dma_enable(np, 1);
5771         niu_txc_set_imask(np, 0);
5772
5773         niudbg(IFUP, "%s: Initialize TX channels\n", np->dev->name);
5774         for (i = 0; i < np->num_tx_rings; i++) {
5775                 struct tx_ring_info *rp = &np->tx_rings[i];
5776
5777                 err = niu_init_one_tx_channel(np, rp);
5778                 if (err)
5779                         return err;
5780         }
5781
5782         niudbg(IFUP, "%s: Initialize RX channels\n", np->dev->name);
5783         err = niu_init_rx_channels(np);
5784         if (err)
5785                 goto out_uninit_tx_channels;
5786
5787         niudbg(IFUP, "%s: Initialize classifier\n", np->dev->name);
5788         err = niu_init_classifier_hw(np);
5789         if (err)
5790                 goto out_uninit_rx_channels;
5791
5792         niudbg(IFUP, "%s: Initialize ZCP\n", np->dev->name);
5793         err = niu_init_zcp(np);
5794         if (err)
5795                 goto out_uninit_rx_channels;
5796
5797         niudbg(IFUP, "%s: Initialize IPP\n", np->dev->name);
5798         err = niu_init_ipp(np);
5799         if (err)
5800                 goto out_uninit_rx_channels;
5801
5802         niudbg(IFUP, "%s: Initialize MAC\n", np->dev->name);
5803         err = niu_init_mac(np);
5804         if (err)
5805                 goto out_uninit_ipp;
5806
5807         return 0;
5808
5809 out_uninit_ipp:
5810         niudbg(IFUP, "%s: Uninit IPP\n", np->dev->name);
5811         niu_disable_ipp(np);
5812
5813 out_uninit_rx_channels:
5814         niudbg(IFUP, "%s: Uninit RX channels\n", np->dev->name);
5815         niu_stop_rx_channels(np);
5816         niu_reset_rx_channels(np);
5817
5818 out_uninit_tx_channels:
5819         niudbg(IFUP, "%s: Uninit TX channels\n", np->dev->name);
5820         niu_stop_tx_channels(np);
5821         niu_reset_tx_channels(np);
5822
5823         return err;
5824 }
5825
5826 static void niu_stop_hw(struct niu *np)
5827 {
5828         niudbg(IFDOWN, "%s: Disable interrupts\n", np->dev->name);
5829         niu_enable_interrupts(np, 0);
5830
5831         niudbg(IFDOWN, "%s: Disable RX MAC\n", np->dev->name);
5832         niu_enable_rx_mac(np, 0);
5833
5834         niudbg(IFDOWN, "%s: Disable IPP\n", np->dev->name);
5835         niu_disable_ipp(np);
5836
5837         niudbg(IFDOWN, "%s: Stop TX channels\n", np->dev->name);
5838         niu_stop_tx_channels(np);
5839
5840         niudbg(IFDOWN, "%s: Stop RX channels\n", np->dev->name);
5841         niu_stop_rx_channels(np);
5842
5843         niudbg(IFDOWN, "%s: Reset TX channels\n", np->dev->name);
5844         niu_reset_tx_channels(np);
5845
5846         niudbg(IFDOWN, "%s: Reset RX channels\n", np->dev->name);
5847         niu_reset_rx_channels(np);
5848 }
5849
5850 static int niu_request_irq(struct niu *np)
5851 {
5852         int i, j, err;
5853
5854         err = 0;
5855         for (i = 0; i < np->num_ldg; i++) {
5856                 struct niu_ldg *lp = &np->ldg[i];
5857
5858                 err = request_irq(lp->irq, niu_interrupt,
5859                                   IRQF_SHARED | IRQF_SAMPLE_RANDOM,
5860                                   np->dev->name, lp);
5861                 if (err)
5862                         goto out_free_irqs;
5863
5864         }
5865
5866         return 0;
5867
5868 out_free_irqs:
5869         for (j = 0; j < i; j++) {
5870                 struct niu_ldg *lp = &np->ldg[j];
5871
5872                 free_irq(lp->irq, lp);
5873         }
5874         return err;
5875 }
5876
5877 static void niu_free_irq(struct niu *np)
5878 {
5879         int i;
5880
5881         for (i = 0; i < np->num_ldg; i++) {
5882                 struct niu_ldg *lp = &np->ldg[i];
5883
5884                 free_irq(lp->irq, lp);
5885         }
5886 }
5887
5888 static void niu_enable_napi(struct niu *np)
5889 {
5890         int i;
5891
5892         for (i = 0; i < np->num_ldg; i++)
5893                 napi_enable(&np->ldg[i].napi);
5894 }
5895
5896 static void niu_disable_napi(struct niu *np)
5897 {
5898         int i;
5899
5900         for (i = 0; i < np->num_ldg; i++)
5901                 napi_disable(&np->ldg[i].napi);
5902 }
5903
5904 static int niu_open(struct net_device *dev)
5905 {
5906         struct niu *np = netdev_priv(dev);
5907         int err;
5908
5909         netif_carrier_off(dev);
5910
5911         err = niu_alloc_channels(np);
5912         if (err)
5913                 goto out_err;
5914
5915         err = niu_enable_interrupts(np, 0);
5916         if (err)
5917                 goto out_free_channels;
5918
5919         err = niu_request_irq(np);
5920         if (err)
5921                 goto out_free_channels;
5922
5923         niu_enable_napi(np);
5924
5925         spin_lock_irq(&np->lock);
5926
5927         err = niu_init_hw(np);
5928         if (!err) {
5929                 init_timer(&np->timer);
5930                 np->timer.expires = jiffies + HZ;
5931                 np->timer.data = (unsigned long) np;
5932                 np->timer.function = niu_timer;
5933
5934                 err = niu_enable_interrupts(np, 1);
5935                 if (err)
5936                         niu_stop_hw(np);
5937         }
5938
5939         spin_unlock_irq(&np->lock);
5940
5941         if (err) {
5942                 niu_disable_napi(np);
5943                 goto out_free_irq;
5944         }
5945
5946         netif_tx_start_all_queues(dev);
5947
5948         if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
5949                 netif_carrier_on(dev);
5950
5951         add_timer(&np->timer);
5952
5953         return 0;
5954
5955 out_free_irq:
5956         niu_free_irq(np);
5957
5958 out_free_channels:
5959         niu_free_channels(np);
5960
5961 out_err:
5962         return err;
5963 }
5964
5965 static void niu_full_shutdown(struct niu *np, struct net_device *dev)
5966 {
5967         cancel_work_sync(&np->reset_task);
5968
5969         niu_disable_napi(np);
5970         netif_tx_stop_all_queues(dev);
5971
5972         del_timer_sync(&np->timer);
5973
5974         spin_lock_irq(&np->lock);
5975
5976         niu_stop_hw(np);
5977
5978         spin_unlock_irq(&np->lock);
5979 }
5980
5981 static int niu_close(struct net_device *dev)
5982 {
5983         struct niu *np = netdev_priv(dev);
5984
5985         niu_full_shutdown(np, dev);
5986
5987         niu_free_irq(np);
5988
5989         niu_free_channels(np);
5990
5991         niu_handle_led(np, 0);
5992
5993         return 0;
5994 }
5995
5996 static void niu_sync_xmac_stats(struct niu *np)
5997 {
5998         struct niu_xmac_stats *mp = &np->mac_stats.xmac;
5999
6000         mp->tx_frames += nr64_mac(TXMAC_FRM_CNT);
6001         mp->tx_bytes += nr64_mac(TXMAC_BYTE_CNT);
6002
6003         mp->rx_link_faults += nr64_mac(LINK_FAULT_CNT);
6004         mp->rx_align_errors += nr64_mac(RXMAC_ALIGN_ERR_CNT);
6005         mp->rx_frags += nr64_mac(RXMAC_FRAG_CNT);
6006         mp->rx_mcasts += nr64_mac(RXMAC_MC_FRM_CNT);
6007         mp->rx_bcasts += nr64_mac(RXMAC_BC_FRM_CNT);
6008         mp->rx_hist_cnt1 += nr64_mac(RXMAC_HIST_CNT1);
6009         mp->rx_hist_cnt2 += nr64_mac(RXMAC_HIST_CNT2);
6010         mp->rx_hist_cnt3 += nr64_mac(RXMAC_HIST_CNT3);
6011         mp->rx_hist_cnt4 += nr64_mac(RXMAC_HIST_CNT4);
6012         mp->rx_hist_cnt5 += nr64_mac(RXMAC_HIST_CNT5);
6013         mp->rx_hist_cnt6 += nr64_mac(RXMAC_HIST_CNT6);
6014         mp->rx_hist_cnt7 += nr64_mac(RXMAC_HIST_CNT7);
6015         mp->rx_octets += nr64_mac(RXMAC_BT_CNT);
6016         mp->rx_code_violations += nr64_mac(RXMAC_CD_VIO_CNT);
6017         mp->rx_len_errors += nr64_mac(RXMAC_MPSZER_CNT);
6018         mp->rx_crc_errors += nr64_mac(RXMAC_CRC_ER_CNT);
6019 }
6020
6021 static void niu_sync_bmac_stats(struct niu *np)
6022 {
6023         struct niu_bmac_stats *mp = &np->mac_stats.bmac;
6024
6025         mp->tx_bytes += nr64_mac(BTXMAC_BYTE_CNT);
6026         mp->tx_frames += nr64_mac(BTXMAC_FRM_CNT);
6027
6028         mp->rx_frames += nr64_mac(BRXMAC_FRAME_CNT);
6029         mp->rx_align_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT);
6030         mp->rx_crc_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT);
6031         mp->rx_len_errors += nr64_mac(BRXMAC_CODE_VIOL_ERR_CNT);
6032 }
6033
6034 static void niu_sync_mac_stats(struct niu *np)
6035 {
6036         if (np->flags & NIU_FLAGS_XMAC)
6037                 niu_sync_xmac_stats(np);
6038         else
6039                 niu_sync_bmac_stats(np);
6040 }
6041
6042 static void niu_get_rx_stats(struct niu *np)
6043 {
6044         unsigned long pkts, dropped, errors, bytes;
6045         int i;
6046
6047         pkts = dropped = errors = bytes = 0;
6048         for (i = 0; i < np->num_rx_rings; i++) {
6049                 struct rx_ring_info *rp = &np->rx_rings[i];
6050
6051                 pkts += rp->rx_packets;
6052                 bytes += rp->rx_bytes;
6053                 dropped += rp->rx_dropped;
6054                 errors += rp->rx_errors;
6055         }
6056         np->net_stats.rx_packets = pkts;
6057         np->net_stats.rx_bytes = bytes;
6058         np->net_stats.rx_dropped = dropped;
6059         np->net_stats.rx_errors = errors;
6060 }
6061
6062 static void niu_get_tx_stats(struct niu *np)
6063 {
6064         unsigned long pkts, errors, bytes;
6065         int i;
6066
6067         pkts = errors = bytes = 0;
6068         for (i = 0; i < np->num_tx_rings; i++) {
6069                 struct tx_ring_info *rp = &np->tx_rings[i];
6070
6071                 pkts += rp->tx_packets;
6072                 bytes += rp->tx_bytes;
6073                 errors += rp->tx_errors;
6074         }
6075         np->net_stats.tx_packets = pkts;
6076         np->net_stats.tx_bytes = bytes;
6077         np->net_stats.tx_errors = errors;
6078 }
6079
6080 static struct net_device_stats *niu_get_stats(struct net_device *dev)
6081 {
6082         struct niu *np = netdev_priv(dev);
6083
6084         niu_get_rx_stats(np);
6085         niu_get_tx_stats(np);
6086
6087         return &np->net_stats;
6088 }
6089
6090 static void niu_load_hash_xmac(struct niu *np, u16 *hash)
6091 {
6092         int i;
6093
6094         for (i = 0; i < 16; i++)
6095                 nw64_mac(XMAC_HASH_TBL(i), hash[i]);
6096 }
6097
6098 static void niu_load_hash_bmac(struct niu *np, u16 *hash)
6099 {
6100         int i;
6101
6102         for (i = 0; i < 16; i++)
6103                 nw64_mac(BMAC_HASH_TBL(i), hash[i]);
6104 }
6105
6106 static void niu_load_hash(struct niu *np, u16 *hash)
6107 {
6108         if (np->flags & NIU_FLAGS_XMAC)
6109                 niu_load_hash_xmac(np, hash);
6110         else
6111                 niu_load_hash_bmac(np, hash);
6112 }
6113
6114 static void niu_set_rx_mode(struct net_device *dev)
6115 {
6116         struct niu *np = netdev_priv(dev);
6117         int i, alt_cnt, err;
6118         struct dev_addr_list *addr;
6119         unsigned long flags;
6120         u16 hash[16] = { 0, };
6121
6122         spin_lock_irqsave(&np->lock, flags);
6123         niu_enable_rx_mac(np, 0);
6124
6125         np->flags &= ~(NIU_FLAGS_MCAST | NIU_FLAGS_PROMISC);
6126         if (dev->flags & IFF_PROMISC)
6127                 np->flags |= NIU_FLAGS_PROMISC;
6128         if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 0))
6129                 np->flags |= NIU_FLAGS_MCAST;
6130
6131         alt_cnt = dev->uc_count;
6132         if (alt_cnt > niu_num_alt_addr(np)) {
6133                 alt_cnt = 0;
6134                 np->flags |= NIU_FLAGS_PROMISC;
6135         }
6136
6137         if (alt_cnt) {
6138                 int index = 0;
6139
6140                 for (addr = dev->uc_list; addr; addr = addr->next) {
6141                         err = niu_set_alt_mac(np, index,
6142                                               addr->da_addr);
6143                         if (err)
6144                                 printk(KERN_WARNING PFX "%s: Error %d "
6145                                        "adding alt mac %d\n",
6146                                        dev->name, err, index);
6147                         err = niu_enable_alt_mac(np, index, 1);
6148                         if (err)
6149                                 printk(KERN_WARNING PFX "%s: Error %d "
6150                                        "enabling alt mac %d\n",
6151                                        dev->name, err, index);
6152
6153                         index++;
6154                 }
6155         } else {
6156                 int alt_start;
6157                 if (np->flags & NIU_FLAGS_XMAC)
6158                         alt_start = 0;
6159                 else
6160                         alt_start = 1;
6161                 for (i = alt_start; i < niu_num_alt_addr(np); i++) {
6162                         err = niu_enable_alt_mac(np, i, 0);
6163                         if (err)
6164                                 printk(KERN_WARNING PFX "%s: Error %d "
6165                                        "disabling alt mac %d\n",
6166                                        dev->name, err, i);
6167                 }
6168         }
6169         if (dev->flags & IFF_ALLMULTI) {
6170                 for (i = 0; i < 16; i++)
6171                         hash[i] = 0xffff;
6172         } else if (dev->mc_count > 0) {
6173                 for (addr = dev->mc_list; addr; addr = addr->next) {
6174                         u32 crc = ether_crc_le(ETH_ALEN, addr->da_addr);
6175
6176                         crc >>= 24;
6177                         hash[crc >> 4] |= (1 << (15 - (crc & 0xf)));
6178                 }
6179         }
6180
6181         if (np->flags & NIU_FLAGS_MCAST)
6182                 niu_load_hash(np, hash);
6183
6184         niu_enable_rx_mac(np, 1);
6185         spin_unlock_irqrestore(&np->lock, flags);
6186 }
6187
6188 static int niu_set_mac_addr(struct net_device *dev, void *p)
6189 {
6190         struct niu *np = netdev_priv(dev);
6191         struct sockaddr *addr = p;
6192         unsigned long flags;
6193
6194         if (!is_valid_ether_addr(addr->sa_data))
6195                 return -EINVAL;
6196
6197         memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
6198
6199         if (!netif_running(dev))
6200                 return 0;
6201
6202         spin_lock_irqsave(&np->lock, flags);
6203         niu_enable_rx_mac(np, 0);
6204         niu_set_primary_mac(np, dev->dev_addr);
6205         niu_enable_rx_mac(np, 1);
6206         spin_unlock_irqrestore(&np->lock, flags);
6207
6208         return 0;
6209 }
6210
6211 static int niu_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6212 {
6213         return -EOPNOTSUPP;
6214 }
6215
6216 static void niu_netif_stop(struct niu *np)
6217 {
6218         np->dev->trans_start = jiffies; /* prevent tx timeout */
6219
6220         niu_disable_napi(np);
6221
6222         netif_tx_disable(np->dev);
6223 }
6224
6225 static void niu_netif_start(struct niu *np)
6226 {
6227         /* NOTE: unconditional netif_wake_queue is only appropriate
6228          * so long as all callers are assured to have free tx slots
6229          * (such as after niu_init_hw).
6230          */
6231         netif_tx_wake_all_queues(np->dev);
6232
6233         niu_enable_napi(np);
6234
6235         niu_enable_interrupts(np, 1);
6236 }
6237
6238 static void niu_reset_buffers(struct niu *np)
6239 {
6240         int i, j, k, err;
6241
6242         if (np->rx_rings) {
6243                 for (i = 0; i < np->num_rx_rings; i++) {
6244                         struct rx_ring_info *rp = &np->rx_rings[i];
6245
6246                         for (j = 0, k = 0; j < MAX_RBR_RING_SIZE; j++) {
6247                                 struct page *page;
6248
6249                                 page = rp->rxhash[j];
6250                                 while (page) {
6251                                         struct page *next =
6252                                                 (struct page *) page->mapping;
6253                                         u64 base = page->index;
6254                                         base = base >> RBR_DESCR_ADDR_SHIFT;
6255                                         rp->rbr[k++] = cpu_to_le32(base);
6256                                         page = next;
6257                                 }
6258                         }
6259                         for (; k < MAX_RBR_RING_SIZE; k++) {
6260                                 err = niu_rbr_add_page(np, rp, GFP_ATOMIC, k);
6261                                 if (unlikely(err))
6262                                         break;
6263                         }
6264
6265                         rp->rbr_index = rp->rbr_table_size - 1;
6266                         rp->rcr_index = 0;
6267                         rp->rbr_pending = 0;
6268                         rp->rbr_refill_pending = 0;
6269                 }
6270         }
6271         if (np->tx_rings) {
6272                 for (i = 0; i < np->num_tx_rings; i++) {
6273                         struct tx_ring_info *rp = &np->tx_rings[i];
6274
6275                         for (j = 0; j < MAX_TX_RING_SIZE; j++) {
6276                                 if (rp->tx_buffs[j].skb)
6277                                         (void) release_tx_packet(np, rp, j);
6278                         }
6279
6280                         rp->pending = MAX_TX_RING_SIZE;
6281                         rp->prod = 0;
6282                         rp->cons = 0;
6283                         rp->wrap_bit = 0;
6284                 }
6285         }
6286 }
6287
6288 static void niu_reset_task(struct work_struct *work)
6289 {
6290         struct niu *np = container_of(work, struct niu, reset_task);
6291         unsigned long flags;
6292         int err;
6293
6294         spin_lock_irqsave(&np->lock, flags);
6295         if (!netif_running(np->dev)) {
6296                 spin_unlock_irqrestore(&np->lock, flags);
6297                 return;
6298         }
6299
6300         spin_unlock_irqrestore(&np->lock, flags);
6301
6302         del_timer_sync(&np->timer);
6303
6304         niu_netif_stop(np);
6305
6306         spin_lock_irqsave(&np->lock, flags);
6307
6308         niu_stop_hw(np);
6309
6310         spin_unlock_irqrestore(&np->lock, flags);
6311
6312         niu_reset_buffers(np);
6313
6314         spin_lock_irqsave(&np->lock, flags);
6315
6316         err = niu_init_hw(np);
6317         if (!err) {
6318                 np->timer.expires = jiffies + HZ;
6319                 add_timer(&np->timer);
6320                 niu_netif_start(np);
6321         }
6322
6323         spin_unlock_irqrestore(&np->lock, flags);
6324 }
6325
6326 static void niu_tx_timeout(struct net_device *dev)
6327 {
6328         struct niu *np = netdev_priv(dev);
6329
6330         dev_err(np->device, PFX "%s: Transmit timed out, resetting\n",
6331                 dev->name);
6332
6333         schedule_work(&np->reset_task);
6334 }
6335
6336 static void niu_set_txd(struct tx_ring_info *rp, int index,
6337                         u64 mapping, u64 len, u64 mark,
6338                         u64 n_frags)
6339 {
6340         __le64 *desc = &rp->descr[index];
6341
6342         *desc = cpu_to_le64(mark |
6343                             (n_frags << TX_DESC_NUM_PTR_SHIFT) |
6344                             (len << TX_DESC_TR_LEN_SHIFT) |
6345                             (mapping & TX_DESC_SAD));
6346 }
6347
6348 static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr,
6349                                 u64 pad_bytes, u64 len)
6350 {
6351         u16 eth_proto, eth_proto_inner;
6352         u64 csum_bits, l3off, ihl, ret;
6353         u8 ip_proto;
6354         int ipv6;
6355
6356         eth_proto = be16_to_cpu(ehdr->h_proto);
6357         eth_proto_inner = eth_proto;
6358         if (eth_proto == ETH_P_8021Q) {
6359                 struct vlan_ethhdr *vp = (struct vlan_ethhdr *) ehdr;
6360                 __be16 val = vp->h_vlan_encapsulated_proto;
6361
6362                 eth_proto_inner = be16_to_cpu(val);
6363         }
6364
6365         ipv6 = ihl = 0;
6366         switch (skb->protocol) {
6367         case __constant_htons(ETH_P_IP):
6368                 ip_proto = ip_hdr(skb)->protocol;
6369                 ihl = ip_hdr(skb)->ihl;
6370                 break;
6371         case __constant_htons(ETH_P_IPV6):
6372                 ip_proto = ipv6_hdr(skb)->nexthdr;
6373                 ihl = (40 >> 2);
6374                 ipv6 = 1;
6375                 break;
6376         default:
6377                 ip_proto = ihl = 0;
6378                 break;
6379         }
6380
6381         csum_bits = TXHDR_CSUM_NONE;
6382         if (skb->ip_summed == CHECKSUM_PARTIAL) {
6383                 u64 start, stuff;
6384
6385                 csum_bits = (ip_proto == IPPROTO_TCP ?
6386                              TXHDR_CSUM_TCP :
6387                              (ip_proto == IPPROTO_UDP ?
6388                               TXHDR_CSUM_UDP : TXHDR_CSUM_SCTP));
6389
6390                 start = skb_transport_offset(skb) -
6391                         (pad_bytes + sizeof(struct tx_pkt_hdr));
6392                 stuff = start + skb->csum_offset;
6393
6394                 csum_bits |= (start / 2) << TXHDR_L4START_SHIFT;
6395                 csum_bits |= (stuff / 2) << TXHDR_L4STUFF_SHIFT;
6396         }
6397
6398         l3off = skb_network_offset(skb) -
6399                 (pad_bytes + sizeof(struct tx_pkt_hdr));
6400
6401         ret = (((pad_bytes / 2) << TXHDR_PAD_SHIFT) |
6402                (len << TXHDR_LEN_SHIFT) |
6403                ((l3off / 2) << TXHDR_L3START_SHIFT) |
6404                (ihl << TXHDR_IHL_SHIFT) |
6405                ((eth_proto_inner < 1536) ? TXHDR_LLC : 0) |
6406                ((eth_proto == ETH_P_8021Q) ? TXHDR_VLAN : 0) |
6407                (ipv6 ? TXHDR_IP_VER : 0) |
6408                csum_bits);
6409
6410         return ret;
6411 }
6412
6413 static int niu_start_xmit(struct sk_buff *skb, struct net_device *dev)
6414 {
6415         struct niu *np = netdev_priv(dev);
6416         unsigned long align, headroom;
6417         struct netdev_queue *txq;
6418         struct tx_ring_info *rp;
6419         struct tx_pkt_hdr *tp;
6420         unsigned int len, nf