mfd: Correct WM8350 I2C return code usage
[linux-2.6.git] / drivers / net / niu.c
1 /* niu.c: Neptune ethernet driver.
2  *
3  * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
4  */
5
6 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/pci.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/netdevice.h>
11 #include <linux/ethtool.h>
12 #include <linux/etherdevice.h>
13 #include <linux/platform_device.h>
14 #include <linux/delay.h>
15 #include <linux/bitops.h>
16 #include <linux/mii.h>
17 #include <linux/if_ether.h>
18 #include <linux/if_vlan.h>
19 #include <linux/ip.h>
20 #include <linux/in.h>
21 #include <linux/ipv6.h>
22 #include <linux/log2.h>
23 #include <linux/jiffies.h>
24 #include <linux/crc32.h>
25
26 #include <linux/io.h>
27
28 #ifdef CONFIG_SPARC64
29 #include <linux/of_device.h>
30 #endif
31
32 #include "niu.h"
33
34 #define DRV_MODULE_NAME         "niu"
35 #define PFX DRV_MODULE_NAME     ": "
36 #define DRV_MODULE_VERSION      "0.9"
37 #define DRV_MODULE_RELDATE      "May 4, 2008"
38
39 static char version[] __devinitdata =
40         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
41
42 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
43 MODULE_DESCRIPTION("NIU ethernet driver");
44 MODULE_LICENSE("GPL");
45 MODULE_VERSION(DRV_MODULE_VERSION);
46
47 #ifndef DMA_44BIT_MASK
48 #define DMA_44BIT_MASK  0x00000fffffffffffULL
49 #endif
50
51 #ifndef readq
52 static u64 readq(void __iomem *reg)
53 {
54         return ((u64) readl(reg)) | (((u64) readl(reg + 4UL)) << 32);
55 }
56
57 static void writeq(u64 val, void __iomem *reg)
58 {
59         writel(val & 0xffffffff, reg);
60         writel(val >> 32, reg + 0x4UL);
61 }
62 #endif
63
64 static struct pci_device_id niu_pci_tbl[] = {
65         {PCI_DEVICE(PCI_VENDOR_ID_SUN, 0xabcd)},
66         {}
67 };
68
69 MODULE_DEVICE_TABLE(pci, niu_pci_tbl);
70
71 #define NIU_TX_TIMEOUT                  (5 * HZ)
72
73 #define nr64(reg)               readq(np->regs + (reg))
74 #define nw64(reg, val)          writeq((val), np->regs + (reg))
75
76 #define nr64_mac(reg)           readq(np->mac_regs + (reg))
77 #define nw64_mac(reg, val)      writeq((val), np->mac_regs + (reg))
78
79 #define nr64_ipp(reg)           readq(np->regs + np->ipp_off + (reg))
80 #define nw64_ipp(reg, val)      writeq((val), np->regs + np->ipp_off + (reg))
81
82 #define nr64_pcs(reg)           readq(np->regs + np->pcs_off + (reg))
83 #define nw64_pcs(reg, val)      writeq((val), np->regs + np->pcs_off + (reg))
84
85 #define nr64_xpcs(reg)          readq(np->regs + np->xpcs_off + (reg))
86 #define nw64_xpcs(reg, val)     writeq((val), np->regs + np->xpcs_off + (reg))
87
88 #define NIU_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
89
90 static int niu_debug;
91 static int debug = -1;
92 module_param(debug, int, 0);
93 MODULE_PARM_DESC(debug, "NIU debug level");
94
95 #define niudbg(TYPE, f, a...) \
96 do {    if ((np)->msg_enable & NETIF_MSG_##TYPE) \
97                 printk(KERN_DEBUG PFX f, ## a); \
98 } while (0)
99
100 #define niuinfo(TYPE, f, a...) \
101 do {    if ((np)->msg_enable & NETIF_MSG_##TYPE) \
102                 printk(KERN_INFO PFX f, ## a); \
103 } while (0)
104
105 #define niuwarn(TYPE, f, a...) \
106 do {    if ((np)->msg_enable & NETIF_MSG_##TYPE) \
107                 printk(KERN_WARNING PFX f, ## a); \
108 } while (0)
109
110 #define niu_lock_parent(np, flags) \
111         spin_lock_irqsave(&np->parent->lock, flags)
112 #define niu_unlock_parent(np, flags) \
113         spin_unlock_irqrestore(&np->parent->lock, flags)
114
115 static int serdes_init_10g_serdes(struct niu *np);
116
117 static int __niu_wait_bits_clear_mac(struct niu *np, unsigned long reg,
118                                      u64 bits, int limit, int delay)
119 {
120         while (--limit >= 0) {
121                 u64 val = nr64_mac(reg);
122
123                 if (!(val & bits))
124                         break;
125                 udelay(delay);
126         }
127         if (limit < 0)
128                 return -ENODEV;
129         return 0;
130 }
131
132 static int __niu_set_and_wait_clear_mac(struct niu *np, unsigned long reg,
133                                         u64 bits, int limit, int delay,
134                                         const char *reg_name)
135 {
136         int err;
137
138         nw64_mac(reg, bits);
139         err = __niu_wait_bits_clear_mac(np, reg, bits, limit, delay);
140         if (err)
141                 dev_err(np->device, PFX "%s: bits (%llx) of register %s "
142                         "would not clear, val[%llx]\n",
143                         np->dev->name, (unsigned long long) bits, reg_name,
144                         (unsigned long long) nr64_mac(reg));
145         return err;
146 }
147
148 #define niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
149 ({      BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
150         __niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
151 })
152
153 static int __niu_wait_bits_clear_ipp(struct niu *np, unsigned long reg,
154                                      u64 bits, int limit, int delay)
155 {
156         while (--limit >= 0) {
157                 u64 val = nr64_ipp(reg);
158
159                 if (!(val & bits))
160                         break;
161                 udelay(delay);
162         }
163         if (limit < 0)
164                 return -ENODEV;
165         return 0;
166 }
167
168 static int __niu_set_and_wait_clear_ipp(struct niu *np, unsigned long reg,
169                                         u64 bits, int limit, int delay,
170                                         const char *reg_name)
171 {
172         int err;
173         u64 val;
174
175         val = nr64_ipp(reg);
176         val |= bits;
177         nw64_ipp(reg, val);
178
179         err = __niu_wait_bits_clear_ipp(np, reg, bits, limit, delay);
180         if (err)
181                 dev_err(np->device, PFX "%s: bits (%llx) of register %s "
182                         "would not clear, val[%llx]\n",
183                         np->dev->name, (unsigned long long) bits, reg_name,
184                         (unsigned long long) nr64_ipp(reg));
185         return err;
186 }
187
188 #define niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
189 ({      BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
190         __niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
191 })
192
193 static int __niu_wait_bits_clear(struct niu *np, unsigned long reg,
194                                  u64 bits, int limit, int delay)
195 {
196         while (--limit >= 0) {
197                 u64 val = nr64(reg);
198
199                 if (!(val & bits))
200                         break;
201                 udelay(delay);
202         }
203         if (limit < 0)
204                 return -ENODEV;
205         return 0;
206 }
207
208 #define niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY) \
209 ({      BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
210         __niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY); \
211 })
212
213 static int __niu_set_and_wait_clear(struct niu *np, unsigned long reg,
214                                     u64 bits, int limit, int delay,
215                                     const char *reg_name)
216 {
217         int err;
218
219         nw64(reg, bits);
220         err = __niu_wait_bits_clear(np, reg, bits, limit, delay);
221         if (err)
222                 dev_err(np->device, PFX "%s: bits (%llx) of register %s "
223                         "would not clear, val[%llx]\n",
224                         np->dev->name, (unsigned long long) bits, reg_name,
225                         (unsigned long long) nr64(reg));
226         return err;
227 }
228
229 #define niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
230 ({      BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
231         __niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
232 })
233
234 static void niu_ldg_rearm(struct niu *np, struct niu_ldg *lp, int on)
235 {
236         u64 val = (u64) lp->timer;
237
238         if (on)
239                 val |= LDG_IMGMT_ARM;
240
241         nw64(LDG_IMGMT(lp->ldg_num), val);
242 }
243
244 static int niu_ldn_irq_enable(struct niu *np, int ldn, int on)
245 {
246         unsigned long mask_reg, bits;
247         u64 val;
248
249         if (ldn < 0 || ldn > LDN_MAX)
250                 return -EINVAL;
251
252         if (ldn < 64) {
253                 mask_reg = LD_IM0(ldn);
254                 bits = LD_IM0_MASK;
255         } else {
256                 mask_reg = LD_IM1(ldn - 64);
257                 bits = LD_IM1_MASK;
258         }
259
260         val = nr64(mask_reg);
261         if (on)
262                 val &= ~bits;
263         else
264                 val |= bits;
265         nw64(mask_reg, val);
266
267         return 0;
268 }
269
270 static int niu_enable_ldn_in_ldg(struct niu *np, struct niu_ldg *lp, int on)
271 {
272         struct niu_parent *parent = np->parent;
273         int i;
274
275         for (i = 0; i <= LDN_MAX; i++) {
276                 int err;
277
278                 if (parent->ldg_map[i] != lp->ldg_num)
279                         continue;
280
281                 err = niu_ldn_irq_enable(np, i, on);
282                 if (err)
283                         return err;
284         }
285         return 0;
286 }
287
288 static int niu_enable_interrupts(struct niu *np, int on)
289 {
290         int i;
291
292         for (i = 0; i < np->num_ldg; i++) {
293                 struct niu_ldg *lp = &np->ldg[i];
294                 int err;
295
296                 err = niu_enable_ldn_in_ldg(np, lp, on);
297                 if (err)
298                         return err;
299         }
300         for (i = 0; i < np->num_ldg; i++)
301                 niu_ldg_rearm(np, &np->ldg[i], on);
302
303         return 0;
304 }
305
306 static u32 phy_encode(u32 type, int port)
307 {
308         return (type << (port * 2));
309 }
310
311 static u32 phy_decode(u32 val, int port)
312 {
313         return (val >> (port * 2)) & PORT_TYPE_MASK;
314 }
315
316 static int mdio_wait(struct niu *np)
317 {
318         int limit = 1000;
319         u64 val;
320
321         while (--limit > 0) {
322                 val = nr64(MIF_FRAME_OUTPUT);
323                 if ((val >> MIF_FRAME_OUTPUT_TA_SHIFT) & 0x1)
324                         return val & MIF_FRAME_OUTPUT_DATA;
325
326                 udelay(10);
327         }
328
329         return -ENODEV;
330 }
331
332 static int mdio_read(struct niu *np, int port, int dev, int reg)
333 {
334         int err;
335
336         nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg));
337         err = mdio_wait(np);
338         if (err < 0)
339                 return err;
340
341         nw64(MIF_FRAME_OUTPUT, MDIO_READ_OP(port, dev));
342         return mdio_wait(np);
343 }
344
345 static int mdio_write(struct niu *np, int port, int dev, int reg, int data)
346 {
347         int err;
348
349         nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg));
350         err = mdio_wait(np);
351         if (err < 0)
352                 return err;
353
354         nw64(MIF_FRAME_OUTPUT, MDIO_WRITE_OP(port, dev, data));
355         err = mdio_wait(np);
356         if (err < 0)
357                 return err;
358
359         return 0;
360 }
361
362 static int mii_read(struct niu *np, int port, int reg)
363 {
364         nw64(MIF_FRAME_OUTPUT, MII_READ_OP(port, reg));
365         return mdio_wait(np);
366 }
367
368 static int mii_write(struct niu *np, int port, int reg, int data)
369 {
370         int err;
371
372         nw64(MIF_FRAME_OUTPUT, MII_WRITE_OP(port, reg, data));
373         err = mdio_wait(np);
374         if (err < 0)
375                 return err;
376
377         return 0;
378 }
379
380 static int esr2_set_tx_cfg(struct niu *np, unsigned long channel, u32 val)
381 {
382         int err;
383
384         err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
385                          ESR2_TI_PLL_TX_CFG_L(channel),
386                          val & 0xffff);
387         if (!err)
388                 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
389                                  ESR2_TI_PLL_TX_CFG_H(channel),
390                                  val >> 16);
391         return err;
392 }
393
394 static int esr2_set_rx_cfg(struct niu *np, unsigned long channel, u32 val)
395 {
396         int err;
397
398         err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
399                          ESR2_TI_PLL_RX_CFG_L(channel),
400                          val & 0xffff);
401         if (!err)
402                 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
403                                  ESR2_TI_PLL_RX_CFG_H(channel),
404                                  val >> 16);
405         return err;
406 }
407
408 /* Mode is always 10G fiber.  */
409 static int serdes_init_niu(struct niu *np)
410 {
411         struct niu_link_config *lp = &np->link_config;
412         u32 tx_cfg, rx_cfg;
413         unsigned long i;
414
415         tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV);
416         rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
417                   PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
418                   PLL_RX_CFG_EQ_LP_ADAPTIVE);
419
420         if (lp->loopback_mode == LOOPBACK_PHY) {
421                 u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
422
423                 mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
424                            ESR2_TI_PLL_TEST_CFG_L, test_cfg);
425
426                 tx_cfg |= PLL_TX_CFG_ENTEST;
427                 rx_cfg |= PLL_RX_CFG_ENTEST;
428         }
429
430         /* Initialize all 4 lanes of the SERDES.  */
431         for (i = 0; i < 4; i++) {
432                 int err = esr2_set_tx_cfg(np, i, tx_cfg);
433                 if (err)
434                         return err;
435         }
436
437         for (i = 0; i < 4; i++) {
438                 int err = esr2_set_rx_cfg(np, i, rx_cfg);
439                 if (err)
440                         return err;
441         }
442
443         return 0;
444 }
445
446 static int esr_read_rxtx_ctrl(struct niu *np, unsigned long chan, u32 *val)
447 {
448         int err;
449
450         err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_CTRL_L(chan));
451         if (err >= 0) {
452                 *val = (err & 0xffff);
453                 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
454                                 ESR_RXTX_CTRL_H(chan));
455                 if (err >= 0)
456                         *val |= ((err & 0xffff) << 16);
457                 err = 0;
458         }
459         return err;
460 }
461
462 static int esr_read_glue0(struct niu *np, unsigned long chan, u32 *val)
463 {
464         int err;
465
466         err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
467                         ESR_GLUE_CTRL0_L(chan));
468         if (err >= 0) {
469                 *val = (err & 0xffff);
470                 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
471                                 ESR_GLUE_CTRL0_H(chan));
472                 if (err >= 0) {
473                         *val |= ((err & 0xffff) << 16);
474                         err = 0;
475                 }
476         }
477         return err;
478 }
479
480 static int esr_read_reset(struct niu *np, u32 *val)
481 {
482         int err;
483
484         err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
485                         ESR_RXTX_RESET_CTRL_L);
486         if (err >= 0) {
487                 *val = (err & 0xffff);
488                 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
489                                 ESR_RXTX_RESET_CTRL_H);
490                 if (err >= 0) {
491                         *val |= ((err & 0xffff) << 16);
492                         err = 0;
493                 }
494         }
495         return err;
496 }
497
498 static int esr_write_rxtx_ctrl(struct niu *np, unsigned long chan, u32 val)
499 {
500         int err;
501
502         err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
503                          ESR_RXTX_CTRL_L(chan), val & 0xffff);
504         if (!err)
505                 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
506                                  ESR_RXTX_CTRL_H(chan), (val >> 16));
507         return err;
508 }
509
510 static int esr_write_glue0(struct niu *np, unsigned long chan, u32 val)
511 {
512         int err;
513
514         err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
515                         ESR_GLUE_CTRL0_L(chan), val & 0xffff);
516         if (!err)
517                 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
518                                  ESR_GLUE_CTRL0_H(chan), (val >> 16));
519         return err;
520 }
521
522 static int esr_reset(struct niu *np)
523 {
524         u32 reset;
525         int err;
526
527         err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
528                          ESR_RXTX_RESET_CTRL_L, 0x0000);
529         if (err)
530                 return err;
531         err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
532                          ESR_RXTX_RESET_CTRL_H, 0xffff);
533         if (err)
534                 return err;
535         udelay(200);
536
537         err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
538                          ESR_RXTX_RESET_CTRL_L, 0xffff);
539         if (err)
540                 return err;
541         udelay(200);
542
543         err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
544                          ESR_RXTX_RESET_CTRL_H, 0x0000);
545         if (err)
546                 return err;
547         udelay(200);
548
549         err = esr_read_reset(np, &reset);
550         if (err)
551                 return err;
552         if (reset != 0) {
553                 dev_err(np->device, PFX "Port %u ESR_RESET "
554                         "did not clear [%08x]\n",
555                         np->port, reset);
556                 return -ENODEV;
557         }
558
559         return 0;
560 }
561
562 static int serdes_init_10g(struct niu *np)
563 {
564         struct niu_link_config *lp = &np->link_config;
565         unsigned long ctrl_reg, test_cfg_reg, i;
566         u64 ctrl_val, test_cfg_val, sig, mask, val;
567         int err;
568
569         switch (np->port) {
570         case 0:
571                 ctrl_reg = ENET_SERDES_0_CTRL_CFG;
572                 test_cfg_reg = ENET_SERDES_0_TEST_CFG;
573                 break;
574         case 1:
575                 ctrl_reg = ENET_SERDES_1_CTRL_CFG;
576                 test_cfg_reg = ENET_SERDES_1_TEST_CFG;
577                 break;
578
579         default:
580                 return -EINVAL;
581         }
582         ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
583                     ENET_SERDES_CTRL_SDET_1 |
584                     ENET_SERDES_CTRL_SDET_2 |
585                     ENET_SERDES_CTRL_SDET_3 |
586                     (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
587                     (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
588                     (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
589                     (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
590                     (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
591                     (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
592                     (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
593                     (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
594         test_cfg_val = 0;
595
596         if (lp->loopback_mode == LOOPBACK_PHY) {
597                 test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
598                                   ENET_SERDES_TEST_MD_0_SHIFT) |
599                                  (ENET_TEST_MD_PAD_LOOPBACK <<
600                                   ENET_SERDES_TEST_MD_1_SHIFT) |
601                                  (ENET_TEST_MD_PAD_LOOPBACK <<
602                                   ENET_SERDES_TEST_MD_2_SHIFT) |
603                                  (ENET_TEST_MD_PAD_LOOPBACK <<
604                                   ENET_SERDES_TEST_MD_3_SHIFT));
605         }
606
607         nw64(ctrl_reg, ctrl_val);
608         nw64(test_cfg_reg, test_cfg_val);
609
610         /* Initialize all 4 lanes of the SERDES.  */
611         for (i = 0; i < 4; i++) {
612                 u32 rxtx_ctrl, glue0;
613
614                 err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
615                 if (err)
616                         return err;
617                 err = esr_read_glue0(np, i, &glue0);
618                 if (err)
619                         return err;
620
621                 rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
622                 rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
623                               (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
624
625                 glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
626                            ESR_GLUE_CTRL0_THCNT |
627                            ESR_GLUE_CTRL0_BLTIME);
628                 glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
629                           (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
630                           (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
631                           (BLTIME_300_CYCLES <<
632                            ESR_GLUE_CTRL0_BLTIME_SHIFT));
633
634                 err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
635                 if (err)
636                         return err;
637                 err = esr_write_glue0(np, i, glue0);
638                 if (err)
639                         return err;
640         }
641
642         err = esr_reset(np);
643         if (err)
644                 return err;
645
646         sig = nr64(ESR_INT_SIGNALS);
647         switch (np->port) {
648         case 0:
649                 mask = ESR_INT_SIGNALS_P0_BITS;
650                 val = (ESR_INT_SRDY0_P0 |
651                        ESR_INT_DET0_P0 |
652                        ESR_INT_XSRDY_P0 |
653                        ESR_INT_XDP_P0_CH3 |
654                        ESR_INT_XDP_P0_CH2 |
655                        ESR_INT_XDP_P0_CH1 |
656                        ESR_INT_XDP_P0_CH0);
657                 break;
658
659         case 1:
660                 mask = ESR_INT_SIGNALS_P1_BITS;
661                 val = (ESR_INT_SRDY0_P1 |
662                        ESR_INT_DET0_P1 |
663                        ESR_INT_XSRDY_P1 |
664                        ESR_INT_XDP_P1_CH3 |
665                        ESR_INT_XDP_P1_CH2 |
666                        ESR_INT_XDP_P1_CH1 |
667                        ESR_INT_XDP_P1_CH0);
668                 break;
669
670         default:
671                 return -EINVAL;
672         }
673
674         if ((sig & mask) != val) {
675                 if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
676                         np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
677                         return 0;
678                 }
679                 dev_err(np->device, PFX "Port %u signal bits [%08x] are not "
680                         "[%08x]\n", np->port, (int) (sig & mask), (int) val);
681                 return -ENODEV;
682         }
683         if (np->flags & NIU_FLAGS_HOTPLUG_PHY)
684                 np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT;
685         return 0;
686 }
687
688 static int serdes_init_1g(struct niu *np)
689 {
690         u64 val;
691
692         val = nr64(ENET_SERDES_1_PLL_CFG);
693         val &= ~ENET_SERDES_PLL_FBDIV2;
694         switch (np->port) {
695         case 0:
696                 val |= ENET_SERDES_PLL_HRATE0;
697                 break;
698         case 1:
699                 val |= ENET_SERDES_PLL_HRATE1;
700                 break;
701         case 2:
702                 val |= ENET_SERDES_PLL_HRATE2;
703                 break;
704         case 3:
705                 val |= ENET_SERDES_PLL_HRATE3;
706                 break;
707         default:
708                 return -EINVAL;
709         }
710         nw64(ENET_SERDES_1_PLL_CFG, val);
711
712         return 0;
713 }
714
715 static int serdes_init_1g_serdes(struct niu *np)
716 {
717         struct niu_link_config *lp = &np->link_config;
718         unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i;
719         u64 ctrl_val, test_cfg_val, sig, mask, val;
720         int err;
721         u64 reset_val, val_rd;
722
723         val = ENET_SERDES_PLL_HRATE0 | ENET_SERDES_PLL_HRATE1 |
724                 ENET_SERDES_PLL_HRATE2 | ENET_SERDES_PLL_HRATE3 |
725                 ENET_SERDES_PLL_FBDIV0;
726         switch (np->port) {
727         case 0:
728                 reset_val =  ENET_SERDES_RESET_0;
729                 ctrl_reg = ENET_SERDES_0_CTRL_CFG;
730                 test_cfg_reg = ENET_SERDES_0_TEST_CFG;
731                 pll_cfg = ENET_SERDES_0_PLL_CFG;
732                 break;
733         case 1:
734                 reset_val =  ENET_SERDES_RESET_1;
735                 ctrl_reg = ENET_SERDES_1_CTRL_CFG;
736                 test_cfg_reg = ENET_SERDES_1_TEST_CFG;
737                 pll_cfg = ENET_SERDES_1_PLL_CFG;
738                 break;
739
740         default:
741                 return -EINVAL;
742         }
743         ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
744                     ENET_SERDES_CTRL_SDET_1 |
745                     ENET_SERDES_CTRL_SDET_2 |
746                     ENET_SERDES_CTRL_SDET_3 |
747                     (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
748                     (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
749                     (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
750                     (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
751                     (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
752                     (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
753                     (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
754                     (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
755         test_cfg_val = 0;
756
757         if (lp->loopback_mode == LOOPBACK_PHY) {
758                 test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
759                                   ENET_SERDES_TEST_MD_0_SHIFT) |
760                                  (ENET_TEST_MD_PAD_LOOPBACK <<
761                                   ENET_SERDES_TEST_MD_1_SHIFT) |
762                                  (ENET_TEST_MD_PAD_LOOPBACK <<
763                                   ENET_SERDES_TEST_MD_2_SHIFT) |
764                                  (ENET_TEST_MD_PAD_LOOPBACK <<
765                                   ENET_SERDES_TEST_MD_3_SHIFT));
766         }
767
768         nw64(ENET_SERDES_RESET, reset_val);
769         mdelay(20);
770         val_rd = nr64(ENET_SERDES_RESET);
771         val_rd &= ~reset_val;
772         nw64(pll_cfg, val);
773         nw64(ctrl_reg, ctrl_val);
774         nw64(test_cfg_reg, test_cfg_val);
775         nw64(ENET_SERDES_RESET, val_rd);
776         mdelay(2000);
777
778         /* Initialize all 4 lanes of the SERDES.  */
779         for (i = 0; i < 4; i++) {
780                 u32 rxtx_ctrl, glue0;
781
782                 err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
783                 if (err)
784                         return err;
785                 err = esr_read_glue0(np, i, &glue0);
786                 if (err)
787                         return err;
788
789                 rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
790                 rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
791                               (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
792
793                 glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
794                            ESR_GLUE_CTRL0_THCNT |
795                            ESR_GLUE_CTRL0_BLTIME);
796                 glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
797                           (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
798                           (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
799                           (BLTIME_300_CYCLES <<
800                            ESR_GLUE_CTRL0_BLTIME_SHIFT));
801
802                 err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
803                 if (err)
804                         return err;
805                 err = esr_write_glue0(np, i, glue0);
806                 if (err)
807                         return err;
808         }
809
810
811         sig = nr64(ESR_INT_SIGNALS);
812         switch (np->port) {
813         case 0:
814                 val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0);
815                 mask = val;
816                 break;
817
818         case 1:
819                 val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1);
820                 mask = val;
821                 break;
822
823         default:
824                 return -EINVAL;
825         }
826
827         if ((sig & mask) != val) {
828                 dev_err(np->device, PFX "Port %u signal bits [%08x] are not "
829                         "[%08x]\n", np->port, (int) (sig & mask), (int) val);
830                 return -ENODEV;
831         }
832
833         return 0;
834 }
835
836 static int link_status_1g_serdes(struct niu *np, int *link_up_p)
837 {
838         struct niu_link_config *lp = &np->link_config;
839         int link_up;
840         u64 val;
841         u16 current_speed;
842         unsigned long flags;
843         u8 current_duplex;
844
845         link_up = 0;
846         current_speed = SPEED_INVALID;
847         current_duplex = DUPLEX_INVALID;
848
849         spin_lock_irqsave(&np->lock, flags);
850
851         val = nr64_pcs(PCS_MII_STAT);
852
853         if (val & PCS_MII_STAT_LINK_STATUS) {
854                 link_up = 1;
855                 current_speed = SPEED_1000;
856                 current_duplex = DUPLEX_FULL;
857         }
858
859         lp->active_speed = current_speed;
860         lp->active_duplex = current_duplex;
861         spin_unlock_irqrestore(&np->lock, flags);
862
863         *link_up_p = link_up;
864         return 0;
865 }
866
867 static int link_status_10g_serdes(struct niu *np, int *link_up_p)
868 {
869         unsigned long flags;
870         struct niu_link_config *lp = &np->link_config;
871         int link_up = 0;
872         int link_ok = 1;
873         u64 val, val2;
874         u16 current_speed;
875         u8 current_duplex;
876
877         if (!(np->flags & NIU_FLAGS_10G))
878                 return link_status_1g_serdes(np, link_up_p);
879
880         current_speed = SPEED_INVALID;
881         current_duplex = DUPLEX_INVALID;
882         spin_lock_irqsave(&np->lock, flags);
883
884         val = nr64_xpcs(XPCS_STATUS(0));
885         val2 = nr64_mac(XMAC_INTER2);
886         if (val2 & 0x01000000)
887                 link_ok = 0;
888
889         if ((val & 0x1000ULL) && link_ok) {
890                 link_up = 1;
891                 current_speed = SPEED_10000;
892                 current_duplex = DUPLEX_FULL;
893         }
894         lp->active_speed = current_speed;
895         lp->active_duplex = current_duplex;
896         spin_unlock_irqrestore(&np->lock, flags);
897         *link_up_p = link_up;
898         return 0;
899 }
900
901 static int link_status_1g_rgmii(struct niu *np, int *link_up_p)
902 {
903         struct niu_link_config *lp = &np->link_config;
904         u16 current_speed, bmsr;
905         unsigned long flags;
906         u8 current_duplex;
907         int err, link_up;
908
909         link_up = 0;
910         current_speed = SPEED_INVALID;
911         current_duplex = DUPLEX_INVALID;
912
913         spin_lock_irqsave(&np->lock, flags);
914
915         err = -EINVAL;
916
917         err = mii_read(np, np->phy_addr, MII_BMSR);
918         if (err < 0)
919                 goto out;
920
921         bmsr = err;
922         if (bmsr & BMSR_LSTATUS) {
923                 u16 adv, lpa, common, estat;
924
925                 err = mii_read(np, np->phy_addr, MII_ADVERTISE);
926                 if (err < 0)
927                         goto out;
928                 adv = err;
929
930                 err = mii_read(np, np->phy_addr, MII_LPA);
931                 if (err < 0)
932                         goto out;
933                 lpa = err;
934
935                 common = adv & lpa;
936
937                 err = mii_read(np, np->phy_addr, MII_ESTATUS);
938                 if (err < 0)
939                         goto out;
940                 estat = err;
941                 link_up = 1;
942                 current_speed = SPEED_1000;
943                 current_duplex = DUPLEX_FULL;
944
945         }
946         lp->active_speed = current_speed;
947         lp->active_duplex = current_duplex;
948         err = 0;
949
950 out:
951         spin_unlock_irqrestore(&np->lock, flags);
952
953         *link_up_p = link_up;
954         return err;
955 }
956
957 static int bcm8704_reset(struct niu *np)
958 {
959         int err, limit;
960
961         err = mdio_read(np, np->phy_addr,
962                         BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
963         if (err < 0)
964                 return err;
965         err |= BMCR_RESET;
966         err = mdio_write(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
967                          MII_BMCR, err);
968         if (err)
969                 return err;
970
971         limit = 1000;
972         while (--limit >= 0) {
973                 err = mdio_read(np, np->phy_addr,
974                                 BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
975                 if (err < 0)
976                         return err;
977                 if (!(err & BMCR_RESET))
978                         break;
979         }
980         if (limit < 0) {
981                 dev_err(np->device, PFX "Port %u PHY will not reset "
982                         "(bmcr=%04x)\n", np->port, (err & 0xffff));
983                 return -ENODEV;
984         }
985         return 0;
986 }
987
988 /* When written, certain PHY registers need to be read back twice
989  * in order for the bits to settle properly.
990  */
991 static int bcm8704_user_dev3_readback(struct niu *np, int reg)
992 {
993         int err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg);
994         if (err < 0)
995                 return err;
996         err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg);
997         if (err < 0)
998                 return err;
999         return 0;
1000 }
1001
1002 static int bcm8706_init_user_dev3(struct niu *np)
1003 {
1004         int err;
1005
1006
1007         err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1008                         BCM8704_USER_OPT_DIGITAL_CTRL);
1009         if (err < 0)
1010                 return err;
1011         err &= ~USER_ODIG_CTRL_GPIOS;
1012         err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT);
1013         err |=  USER_ODIG_CTRL_RESV2;
1014         err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1015                          BCM8704_USER_OPT_DIGITAL_CTRL, err);
1016         if (err)
1017                 return err;
1018
1019         mdelay(1000);
1020
1021         return 0;
1022 }
1023
1024 static int bcm8704_init_user_dev3(struct niu *np)
1025 {
1026         int err;
1027
1028         err = mdio_write(np, np->phy_addr,
1029                          BCM8704_USER_DEV3_ADDR, BCM8704_USER_CONTROL,
1030                          (USER_CONTROL_OPTXRST_LVL |
1031                           USER_CONTROL_OPBIASFLT_LVL |
1032                           USER_CONTROL_OBTMPFLT_LVL |
1033                           USER_CONTROL_OPPRFLT_LVL |
1034                           USER_CONTROL_OPTXFLT_LVL |
1035                           USER_CONTROL_OPRXLOS_LVL |
1036                           USER_CONTROL_OPRXFLT_LVL |
1037                           USER_CONTROL_OPTXON_LVL |
1038                           (0x3f << USER_CONTROL_RES1_SHIFT)));
1039         if (err)
1040                 return err;
1041
1042         err = mdio_write(np, np->phy_addr,
1043                          BCM8704_USER_DEV3_ADDR, BCM8704_USER_PMD_TX_CONTROL,
1044                          (USER_PMD_TX_CTL_XFP_CLKEN |
1045                           (1 << USER_PMD_TX_CTL_TX_DAC_TXD_SH) |
1046                           (2 << USER_PMD_TX_CTL_TX_DAC_TXCK_SH) |
1047                           USER_PMD_TX_CTL_TSCK_LPWREN));
1048         if (err)
1049                 return err;
1050
1051         err = bcm8704_user_dev3_readback(np, BCM8704_USER_CONTROL);
1052         if (err)
1053                 return err;
1054         err = bcm8704_user_dev3_readback(np, BCM8704_USER_PMD_TX_CONTROL);
1055         if (err)
1056                 return err;
1057
1058         err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1059                         BCM8704_USER_OPT_DIGITAL_CTRL);
1060         if (err < 0)
1061                 return err;
1062         err &= ~USER_ODIG_CTRL_GPIOS;
1063         err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT);
1064         err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1065                          BCM8704_USER_OPT_DIGITAL_CTRL, err);
1066         if (err)
1067                 return err;
1068
1069         mdelay(1000);
1070
1071         return 0;
1072 }
1073
1074 static int mrvl88x2011_act_led(struct niu *np, int val)
1075 {
1076         int     err;
1077
1078         err  = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
1079                 MRVL88X2011_LED_8_TO_11_CTL);
1080         if (err < 0)
1081                 return err;
1082
1083         err &= ~MRVL88X2011_LED(MRVL88X2011_LED_ACT,MRVL88X2011_LED_CTL_MASK);
1084         err |=  MRVL88X2011_LED(MRVL88X2011_LED_ACT,val);
1085
1086         return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
1087                           MRVL88X2011_LED_8_TO_11_CTL, err);
1088 }
1089
1090 static int mrvl88x2011_led_blink_rate(struct niu *np, int rate)
1091 {
1092         int     err;
1093
1094         err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
1095                         MRVL88X2011_LED_BLINK_CTL);
1096         if (err >= 0) {
1097                 err &= ~MRVL88X2011_LED_BLKRATE_MASK;
1098                 err |= (rate << 4);
1099
1100                 err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
1101                                  MRVL88X2011_LED_BLINK_CTL, err);
1102         }
1103
1104         return err;
1105 }
1106
1107 static int xcvr_init_10g_mrvl88x2011(struct niu *np)
1108 {
1109         int     err;
1110
1111         /* Set LED functions */
1112         err = mrvl88x2011_led_blink_rate(np, MRVL88X2011_LED_BLKRATE_134MS);
1113         if (err)
1114                 return err;
1115
1116         /* led activity */
1117         err = mrvl88x2011_act_led(np, MRVL88X2011_LED_CTL_OFF);
1118         if (err)
1119                 return err;
1120
1121         err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
1122                         MRVL88X2011_GENERAL_CTL);
1123         if (err < 0)
1124                 return err;
1125
1126         err |= MRVL88X2011_ENA_XFPREFCLK;
1127
1128         err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
1129                          MRVL88X2011_GENERAL_CTL, err);
1130         if (err < 0)
1131                 return err;
1132
1133         err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1134                         MRVL88X2011_PMA_PMD_CTL_1);
1135         if (err < 0)
1136                 return err;
1137
1138         if (np->link_config.loopback_mode == LOOPBACK_MAC)
1139                 err |= MRVL88X2011_LOOPBACK;
1140         else
1141                 err &= ~MRVL88X2011_LOOPBACK;
1142
1143         err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1144                          MRVL88X2011_PMA_PMD_CTL_1, err);
1145         if (err < 0)
1146                 return err;
1147
1148         /* Enable PMD  */
1149         return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1150                           MRVL88X2011_10G_PMD_TX_DIS, MRVL88X2011_ENA_PMDTX);
1151 }
1152
1153
1154 static int xcvr_diag_bcm870x(struct niu *np)
1155 {
1156         u16 analog_stat0, tx_alarm_status;
1157         int err = 0;
1158
1159 #if 1
1160         err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
1161                         MII_STAT1000);
1162         if (err < 0)
1163                 return err;
1164         pr_info(PFX "Port %u PMA_PMD(MII_STAT1000) [%04x]\n",
1165                 np->port, err);
1166
1167         err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 0x20);
1168         if (err < 0)
1169                 return err;
1170         pr_info(PFX "Port %u USER_DEV3(0x20) [%04x]\n",
1171                 np->port, err);
1172
1173         err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
1174                         MII_NWAYTEST);
1175         if (err < 0)
1176                 return err;
1177         pr_info(PFX "Port %u PHYXS(MII_NWAYTEST) [%04x]\n",
1178                 np->port, err);
1179 #endif
1180
1181         /* XXX dig this out it might not be so useful XXX */
1182         err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1183                         BCM8704_USER_ANALOG_STATUS0);
1184         if (err < 0)
1185                 return err;
1186         err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1187                         BCM8704_USER_ANALOG_STATUS0);
1188         if (err < 0)
1189                 return err;
1190         analog_stat0 = err;
1191
1192         err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1193                         BCM8704_USER_TX_ALARM_STATUS);
1194         if (err < 0)
1195                 return err;
1196         err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1197                         BCM8704_USER_TX_ALARM_STATUS);
1198         if (err < 0)
1199                 return err;
1200         tx_alarm_status = err;
1201
1202         if (analog_stat0 != 0x03fc) {
1203                 if ((analog_stat0 == 0x43bc) && (tx_alarm_status != 0)) {
1204                         pr_info(PFX "Port %u cable not connected "
1205                                 "or bad cable.\n", np->port);
1206                 } else if (analog_stat0 == 0x639c) {
1207                         pr_info(PFX "Port %u optical module is bad "
1208                                 "or missing.\n", np->port);
1209                 }
1210         }
1211
1212         return 0;
1213 }
1214
1215 static int xcvr_10g_set_lb_bcm870x(struct niu *np)
1216 {
1217         struct niu_link_config *lp = &np->link_config;
1218         int err;
1219
1220         err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
1221                         MII_BMCR);
1222         if (err < 0)
1223                 return err;
1224
1225         err &= ~BMCR_LOOPBACK;
1226
1227         if (lp->loopback_mode == LOOPBACK_MAC)
1228                 err |= BMCR_LOOPBACK;
1229
1230         err = mdio_write(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
1231                          MII_BMCR, err);
1232         if (err)
1233                 return err;
1234
1235         return 0;
1236 }
1237
1238 static int xcvr_init_10g_bcm8706(struct niu *np)
1239 {
1240         int err = 0;
1241         u64 val;
1242
1243         if ((np->flags & NIU_FLAGS_HOTPLUG_PHY) &&
1244             (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) == 0)
1245                         return err;
1246
1247         val = nr64_mac(XMAC_CONFIG);
1248         val &= ~XMAC_CONFIG_LED_POLARITY;
1249         val |= XMAC_CONFIG_FORCE_LED_ON;
1250         nw64_mac(XMAC_CONFIG, val);
1251
1252         val = nr64(MIF_CONFIG);
1253         val |= MIF_CONFIG_INDIRECT_MODE;
1254         nw64(MIF_CONFIG, val);
1255
1256         err = bcm8704_reset(np);
1257         if (err)
1258                 return err;
1259
1260         err = xcvr_10g_set_lb_bcm870x(np);
1261         if (err)
1262                 return err;
1263
1264         err = bcm8706_init_user_dev3(np);
1265         if (err)
1266                 return err;
1267
1268         err = xcvr_diag_bcm870x(np);
1269         if (err)
1270                 return err;
1271
1272         return 0;
1273 }
1274
1275 static int xcvr_init_10g_bcm8704(struct niu *np)
1276 {
1277         int err;
1278
1279         err = bcm8704_reset(np);
1280         if (err)
1281                 return err;
1282
1283         err = bcm8704_init_user_dev3(np);
1284         if (err)
1285                 return err;
1286
1287         err = xcvr_10g_set_lb_bcm870x(np);
1288         if (err)
1289                 return err;
1290
1291         err =  xcvr_diag_bcm870x(np);
1292         if (err)
1293                 return err;
1294
1295         return 0;
1296 }
1297
1298 static int xcvr_init_10g(struct niu *np)
1299 {
1300         int phy_id, err;
1301         u64 val;
1302
1303         val = nr64_mac(XMAC_CONFIG);
1304         val &= ~XMAC_CONFIG_LED_POLARITY;
1305         val |= XMAC_CONFIG_FORCE_LED_ON;
1306         nw64_mac(XMAC_CONFIG, val);
1307
1308         /* XXX shared resource, lock parent XXX */
1309         val = nr64(MIF_CONFIG);
1310         val |= MIF_CONFIG_INDIRECT_MODE;
1311         nw64(MIF_CONFIG, val);
1312
1313         phy_id = phy_decode(np->parent->port_phy, np->port);
1314         phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port];
1315
1316         /* handle different phy types */
1317         switch (phy_id & NIU_PHY_ID_MASK) {
1318         case NIU_PHY_ID_MRVL88X2011:
1319                 err = xcvr_init_10g_mrvl88x2011(np);
1320                 break;
1321
1322         default: /* bcom 8704 */
1323                 err = xcvr_init_10g_bcm8704(np);
1324                 break;
1325         }
1326
1327         return 0;
1328 }
1329
1330 static int mii_reset(struct niu *np)
1331 {
1332         int limit, err;
1333
1334         err = mii_write(np, np->phy_addr, MII_BMCR, BMCR_RESET);
1335         if (err)
1336                 return err;
1337
1338         limit = 1000;
1339         while (--limit >= 0) {
1340                 udelay(500);
1341                 err = mii_read(np, np->phy_addr, MII_BMCR);
1342                 if (err < 0)
1343                         return err;
1344                 if (!(err & BMCR_RESET))
1345                         break;
1346         }
1347         if (limit < 0) {
1348                 dev_err(np->device, PFX "Port %u MII would not reset, "
1349                         "bmcr[%04x]\n", np->port, err);
1350                 return -ENODEV;
1351         }
1352
1353         return 0;
1354 }
1355
1356 static int xcvr_init_1g_rgmii(struct niu *np)
1357 {
1358         int err;
1359         u64 val;
1360         u16 bmcr, bmsr, estat;
1361
1362         val = nr64(MIF_CONFIG);
1363         val &= ~MIF_CONFIG_INDIRECT_MODE;
1364         nw64(MIF_CONFIG, val);
1365
1366         err = mii_reset(np);
1367         if (err)
1368                 return err;
1369
1370         err = mii_read(np, np->phy_addr, MII_BMSR);
1371         if (err < 0)
1372                 return err;
1373         bmsr = err;
1374
1375         estat = 0;
1376         if (bmsr & BMSR_ESTATEN) {
1377                 err = mii_read(np, np->phy_addr, MII_ESTATUS);
1378                 if (err < 0)
1379                         return err;
1380                 estat = err;
1381         }
1382
1383         bmcr = 0;
1384         err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
1385         if (err)
1386                 return err;
1387
1388         if (bmsr & BMSR_ESTATEN) {
1389                 u16 ctrl1000 = 0;
1390
1391                 if (estat & ESTATUS_1000_TFULL)
1392                         ctrl1000 |= ADVERTISE_1000FULL;
1393                 err = mii_write(np, np->phy_addr, MII_CTRL1000, ctrl1000);
1394                 if (err)
1395                         return err;
1396         }
1397
1398         bmcr = (BMCR_SPEED1000 | BMCR_FULLDPLX);
1399
1400         err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
1401         if (err)
1402                 return err;
1403
1404         err = mii_read(np, np->phy_addr, MII_BMCR);
1405         if (err < 0)
1406                 return err;
1407         bmcr = mii_read(np, np->phy_addr, MII_BMCR);
1408
1409         err = mii_read(np, np->phy_addr, MII_BMSR);
1410         if (err < 0)
1411                 return err;
1412
1413         return 0;
1414 }
1415
1416 static int mii_init_common(struct niu *np)
1417 {
1418         struct niu_link_config *lp = &np->link_config;
1419         u16 bmcr, bmsr, adv, estat;
1420         int err;
1421
1422         err = mii_reset(np);
1423         if (err)
1424                 return err;
1425
1426         err = mii_read(np, np->phy_addr, MII_BMSR);
1427         if (err < 0)
1428                 return err;
1429         bmsr = err;
1430
1431         estat = 0;
1432         if (bmsr & BMSR_ESTATEN) {
1433                 err = mii_read(np, np->phy_addr, MII_ESTATUS);
1434                 if (err < 0)
1435                         return err;
1436                 estat = err;
1437         }
1438
1439         bmcr = 0;
1440         err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
1441         if (err)
1442                 return err;
1443
1444         if (lp->loopback_mode == LOOPBACK_MAC) {
1445                 bmcr |= BMCR_LOOPBACK;
1446                 if (lp->active_speed == SPEED_1000)
1447                         bmcr |= BMCR_SPEED1000;
1448                 if (lp->active_duplex == DUPLEX_FULL)
1449                         bmcr |= BMCR_FULLDPLX;
1450         }
1451
1452         if (lp->loopback_mode == LOOPBACK_PHY) {
1453                 u16 aux;
1454
1455                 aux = (BCM5464R_AUX_CTL_EXT_LB |
1456                        BCM5464R_AUX_CTL_WRITE_1);
1457                 err = mii_write(np, np->phy_addr, BCM5464R_AUX_CTL, aux);
1458                 if (err)
1459                         return err;
1460         }
1461
1462         /* XXX configurable XXX */
1463         /* XXX for now don't advertise half-duplex or asym pause... XXX */
1464         adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1465         if (bmsr & BMSR_10FULL)
1466                 adv |= ADVERTISE_10FULL;
1467         if (bmsr & BMSR_100FULL)
1468                 adv |= ADVERTISE_100FULL;
1469         err = mii_write(np, np->phy_addr, MII_ADVERTISE, adv);
1470         if (err)
1471                 return err;
1472
1473         if (bmsr & BMSR_ESTATEN) {
1474                 u16 ctrl1000 = 0;
1475
1476                 if (estat & ESTATUS_1000_TFULL)
1477                         ctrl1000 |= ADVERTISE_1000FULL;
1478                 err = mii_write(np, np->phy_addr, MII_CTRL1000, ctrl1000);
1479                 if (err)
1480                         return err;
1481         }
1482         bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
1483
1484         err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
1485         if (err)
1486                 return err;
1487
1488         err = mii_read(np, np->phy_addr, MII_BMCR);
1489         if (err < 0)
1490                 return err;
1491         err = mii_read(np, np->phy_addr, MII_BMSR);
1492         if (err < 0)
1493                 return err;
1494 #if 0
1495         pr_info(PFX "Port %u after MII init bmcr[%04x] bmsr[%04x]\n",
1496                 np->port, bmcr, bmsr);
1497 #endif
1498
1499         return 0;
1500 }
1501
1502 static int xcvr_init_1g(struct niu *np)
1503 {
1504         u64 val;
1505
1506         /* XXX shared resource, lock parent XXX */
1507         val = nr64(MIF_CONFIG);
1508         val &= ~MIF_CONFIG_INDIRECT_MODE;
1509         nw64(MIF_CONFIG, val);
1510
1511         return mii_init_common(np);
1512 }
1513
1514 static int niu_xcvr_init(struct niu *np)
1515 {
1516         const struct niu_phy_ops *ops = np->phy_ops;
1517         int err;
1518
1519         err = 0;
1520         if (ops->xcvr_init)
1521                 err = ops->xcvr_init(np);
1522
1523         return err;
1524 }
1525
1526 static int niu_serdes_init(struct niu *np)
1527 {
1528         const struct niu_phy_ops *ops = np->phy_ops;
1529         int err;
1530
1531         err = 0;
1532         if (ops->serdes_init)
1533                 err = ops->serdes_init(np);
1534
1535         return err;
1536 }
1537
1538 static void niu_init_xif(struct niu *);
1539 static void niu_handle_led(struct niu *, int status);
1540
1541 static int niu_link_status_common(struct niu *np, int link_up)
1542 {
1543         struct niu_link_config *lp = &np->link_config;
1544         struct net_device *dev = np->dev;
1545         unsigned long flags;
1546
1547         if (!netif_carrier_ok(dev) && link_up) {
1548                 niuinfo(LINK, "%s: Link is up at %s, %s duplex\n",
1549                        dev->name,
1550                        (lp->active_speed == SPEED_10000 ?
1551                         "10Gb/sec" :
1552                         (lp->active_speed == SPEED_1000 ?
1553                          "1Gb/sec" :
1554                          (lp->active_speed == SPEED_100 ?
1555                           "100Mbit/sec" : "10Mbit/sec"))),
1556                        (lp->active_duplex == DUPLEX_FULL ?
1557                         "full" : "half"));
1558
1559                 spin_lock_irqsave(&np->lock, flags);
1560                 niu_init_xif(np);
1561                 niu_handle_led(np, 1);
1562                 spin_unlock_irqrestore(&np->lock, flags);
1563
1564                 netif_carrier_on(dev);
1565         } else if (netif_carrier_ok(dev) && !link_up) {
1566                 niuwarn(LINK, "%s: Link is down\n", dev->name);
1567                 spin_lock_irqsave(&np->lock, flags);
1568                 niu_handle_led(np, 0);
1569                 spin_unlock_irqrestore(&np->lock, flags);
1570                 netif_carrier_off(dev);
1571         }
1572
1573         return 0;
1574 }
1575
1576 static int link_status_10g_mrvl(struct niu *np, int *link_up_p)
1577 {
1578         int err, link_up, pma_status, pcs_status;
1579
1580         link_up = 0;
1581
1582         err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1583                         MRVL88X2011_10G_PMD_STATUS_2);
1584         if (err < 0)
1585                 goto out;
1586
1587         /* Check PMA/PMD Register: 1.0001.2 == 1 */
1588         err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1589                         MRVL88X2011_PMA_PMD_STATUS_1);
1590         if (err < 0)
1591                 goto out;
1592
1593         pma_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0);
1594
1595         /* Check PMC Register : 3.0001.2 == 1: read twice */
1596         err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
1597                         MRVL88X2011_PMA_PMD_STATUS_1);
1598         if (err < 0)
1599                 goto out;
1600
1601         err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
1602                         MRVL88X2011_PMA_PMD_STATUS_1);
1603         if (err < 0)
1604                 goto out;
1605
1606         pcs_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0);
1607
1608         /* Check XGXS Register : 4.0018.[0-3,12] */
1609         err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV4_ADDR,
1610                         MRVL88X2011_10G_XGXS_LANE_STAT);
1611         if (err < 0)
1612                 goto out;
1613
1614         if (err == (PHYXS_XGXS_LANE_STAT_ALINGED | PHYXS_XGXS_LANE_STAT_LANE3 |
1615                     PHYXS_XGXS_LANE_STAT_LANE2 | PHYXS_XGXS_LANE_STAT_LANE1 |
1616                     PHYXS_XGXS_LANE_STAT_LANE0 | PHYXS_XGXS_LANE_STAT_MAGIC |
1617                     0x800))
1618                 link_up = (pma_status && pcs_status) ? 1 : 0;
1619
1620         np->link_config.active_speed = SPEED_10000;
1621         np->link_config.active_duplex = DUPLEX_FULL;
1622         err = 0;
1623 out:
1624         mrvl88x2011_act_led(np, (link_up ?
1625                                  MRVL88X2011_LED_CTL_PCS_ACT :
1626                                  MRVL88X2011_LED_CTL_OFF));
1627
1628         *link_up_p = link_up;
1629         return err;
1630 }
1631
1632 static int link_status_10g_bcm8706(struct niu *np, int *link_up_p)
1633 {
1634         int err, link_up;
1635         link_up = 0;
1636
1637         err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
1638                         BCM8704_PMD_RCV_SIGDET);
1639         if (err < 0)
1640                 goto out;
1641         if (!(err & PMD_RCV_SIGDET_GLOBAL)) {
1642                 err = 0;
1643                 goto out;
1644         }
1645
1646         err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
1647                         BCM8704_PCS_10G_R_STATUS);
1648         if (err < 0)
1649                 goto out;
1650
1651         if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) {
1652                 err = 0;
1653                 goto out;
1654         }
1655
1656         err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
1657                         BCM8704_PHYXS_XGXS_LANE_STAT);
1658         if (err < 0)
1659                 goto out;
1660         if (err != (PHYXS_XGXS_LANE_STAT_ALINGED |
1661                     PHYXS_XGXS_LANE_STAT_MAGIC |
1662                     PHYXS_XGXS_LANE_STAT_PATTEST |
1663                     PHYXS_XGXS_LANE_STAT_LANE3 |
1664                     PHYXS_XGXS_LANE_STAT_LANE2 |
1665                     PHYXS_XGXS_LANE_STAT_LANE1 |
1666                     PHYXS_XGXS_LANE_STAT_LANE0)) {
1667                 err = 0;
1668                 np->link_config.active_speed = SPEED_INVALID;
1669                 np->link_config.active_duplex = DUPLEX_INVALID;
1670                 goto out;
1671         }
1672
1673         link_up = 1;
1674         np->link_config.active_speed = SPEED_10000;
1675         np->link_config.active_duplex = DUPLEX_FULL;
1676         err = 0;
1677
1678 out:
1679         *link_up_p = link_up;
1680         if (np->flags & NIU_FLAGS_HOTPLUG_PHY)
1681                 err = 0;
1682         return err;
1683 }
1684
1685 static int link_status_10g_bcom(struct niu *np, int *link_up_p)
1686 {
1687         int err, link_up;
1688
1689         link_up = 0;
1690
1691         err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
1692                         BCM8704_PMD_RCV_SIGDET);
1693         if (err < 0)
1694                 goto out;
1695         if (!(err & PMD_RCV_SIGDET_GLOBAL)) {
1696                 err = 0;
1697                 goto out;
1698         }
1699
1700         err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
1701                         BCM8704_PCS_10G_R_STATUS);
1702         if (err < 0)
1703                 goto out;
1704         if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) {
1705                 err = 0;
1706                 goto out;
1707         }
1708
1709         err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
1710                         BCM8704_PHYXS_XGXS_LANE_STAT);
1711         if (err < 0)
1712                 goto out;
1713
1714         if (err != (PHYXS_XGXS_LANE_STAT_ALINGED |
1715                     PHYXS_XGXS_LANE_STAT_MAGIC |
1716                     PHYXS_XGXS_LANE_STAT_LANE3 |
1717                     PHYXS_XGXS_LANE_STAT_LANE2 |
1718                     PHYXS_XGXS_LANE_STAT_LANE1 |
1719                     PHYXS_XGXS_LANE_STAT_LANE0)) {
1720                 err = 0;
1721                 goto out;
1722         }
1723
1724         link_up = 1;
1725         np->link_config.active_speed = SPEED_10000;
1726         np->link_config.active_duplex = DUPLEX_FULL;
1727         err = 0;
1728
1729 out:
1730         *link_up_p = link_up;
1731         return err;
1732 }
1733
1734 static int link_status_10g(struct niu *np, int *link_up_p)
1735 {
1736         unsigned long flags;
1737         int err = -EINVAL;
1738
1739         spin_lock_irqsave(&np->lock, flags);
1740
1741         if (np->link_config.loopback_mode == LOOPBACK_DISABLED) {
1742                 int phy_id;
1743
1744                 phy_id = phy_decode(np->parent->port_phy, np->port);
1745                 phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port];
1746
1747                 /* handle different phy types */
1748                 switch (phy_id & NIU_PHY_ID_MASK) {
1749                 case NIU_PHY_ID_MRVL88X2011:
1750                         err = link_status_10g_mrvl(np, link_up_p);
1751                         break;
1752
1753                 default: /* bcom 8704 */
1754                         err = link_status_10g_bcom(np, link_up_p);
1755                         break;
1756                 }
1757         }
1758
1759         spin_unlock_irqrestore(&np->lock, flags);
1760
1761         return err;
1762 }
1763
1764 static int niu_10g_phy_present(struct niu *np)
1765 {
1766         u64 sig, mask, val;
1767
1768         sig = nr64(ESR_INT_SIGNALS);
1769         switch (np->port) {
1770         case 0:
1771                 mask = ESR_INT_SIGNALS_P0_BITS;
1772                 val = (ESR_INT_SRDY0_P0 |
1773                        ESR_INT_DET0_P0 |
1774                        ESR_INT_XSRDY_P0 |
1775                        ESR_INT_XDP_P0_CH3 |
1776                        ESR_INT_XDP_P0_CH2 |
1777                        ESR_INT_XDP_P0_CH1 |
1778                        ESR_INT_XDP_P0_CH0);
1779                 break;
1780
1781         case 1:
1782                 mask = ESR_INT_SIGNALS_P1_BITS;
1783                 val = (ESR_INT_SRDY0_P1 |
1784                        ESR_INT_DET0_P1 |
1785                        ESR_INT_XSRDY_P1 |
1786                        ESR_INT_XDP_P1_CH3 |
1787                        ESR_INT_XDP_P1_CH2 |
1788                        ESR_INT_XDP_P1_CH1 |
1789                        ESR_INT_XDP_P1_CH0);
1790                 break;
1791
1792         default:
1793                 return 0;
1794         }
1795
1796         if ((sig & mask) != val)
1797                 return 0;
1798         return 1;
1799 }
1800
1801 static int link_status_10g_hotplug(struct niu *np, int *link_up_p)
1802 {
1803         unsigned long flags;
1804         int err = 0;
1805         int phy_present;
1806         int phy_present_prev;
1807
1808         spin_lock_irqsave(&np->lock, flags);
1809
1810         if (np->link_config.loopback_mode == LOOPBACK_DISABLED) {
1811                 phy_present_prev = (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) ?
1812                         1 : 0;
1813                 phy_present = niu_10g_phy_present(np);
1814                 if (phy_present != phy_present_prev) {
1815                         /* state change */
1816                         if (phy_present) {
1817                                 np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT;
1818                                 if (np->phy_ops->xcvr_init)
1819                                         err = np->phy_ops->xcvr_init(np);
1820                                 if (err) {
1821                                         /* debounce */
1822                                         np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
1823                                 }
1824                         } else {
1825                                 np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
1826                                 *link_up_p = 0;
1827                                 niuwarn(LINK, "%s: Hotplug PHY Removed\n",
1828                                         np->dev->name);
1829                         }
1830                 }
1831                 if (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT)
1832                         err = link_status_10g_bcm8706(np, link_up_p);
1833         }
1834
1835         spin_unlock_irqrestore(&np->lock, flags);
1836
1837         return err;
1838 }
1839
1840 static int link_status_1g(struct niu *np, int *link_up_p)
1841 {
1842         struct niu_link_config *lp = &np->link_config;
1843         u16 current_speed, bmsr;
1844         unsigned long flags;
1845         u8 current_duplex;
1846         int err, link_up;
1847
1848         link_up = 0;
1849         current_speed = SPEED_INVALID;
1850         current_duplex = DUPLEX_INVALID;
1851
1852         spin_lock_irqsave(&np->lock, flags);
1853
1854         err = -EINVAL;
1855         if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
1856                 goto out;
1857
1858         err = mii_read(np, np->phy_addr, MII_BMSR);
1859         if (err < 0)
1860                 goto out;
1861
1862         bmsr = err;
1863         if (bmsr & BMSR_LSTATUS) {
1864                 u16 adv, lpa, common, estat;
1865
1866                 err = mii_read(np, np->phy_addr, MII_ADVERTISE);
1867                 if (err < 0)
1868                         goto out;
1869                 adv = err;
1870
1871                 err = mii_read(np, np->phy_addr, MII_LPA);
1872                 if (err < 0)
1873                         goto out;
1874                 lpa = err;
1875
1876                 common = adv & lpa;
1877
1878                 err = mii_read(np, np->phy_addr, MII_ESTATUS);
1879                 if (err < 0)
1880                         goto out;
1881                 estat = err;
1882
1883                 link_up = 1;
1884                 if (estat & (ESTATUS_1000_TFULL | ESTATUS_1000_THALF)) {
1885                         current_speed = SPEED_1000;
1886                         if (estat & ESTATUS_1000_TFULL)
1887                                 current_duplex = DUPLEX_FULL;
1888                         else
1889                                 current_duplex = DUPLEX_HALF;
1890                 } else {
1891                         if (common & ADVERTISE_100BASE4) {
1892                                 current_speed = SPEED_100;
1893                                 current_duplex = DUPLEX_HALF;
1894                         } else if (common & ADVERTISE_100FULL) {
1895                                 current_speed = SPEED_100;
1896                                 current_duplex = DUPLEX_FULL;
1897                         } else if (common & ADVERTISE_100HALF) {
1898                                 current_speed = SPEED_100;
1899                                 current_duplex = DUPLEX_HALF;
1900                         } else if (common & ADVERTISE_10FULL) {
1901                                 current_speed = SPEED_10;
1902                                 current_duplex = DUPLEX_FULL;
1903                         } else if (common & ADVERTISE_10HALF) {
1904                                 current_speed = SPEED_10;
1905                                 current_duplex = DUPLEX_HALF;
1906                         } else
1907                                 link_up = 0;
1908                 }
1909         }
1910         lp->active_speed = current_speed;
1911         lp->active_duplex = current_duplex;
1912         err = 0;
1913
1914 out:
1915         spin_unlock_irqrestore(&np->lock, flags);
1916
1917         *link_up_p = link_up;
1918         return err;
1919 }
1920
1921 static int niu_link_status(struct niu *np, int *link_up_p)
1922 {
1923         const struct niu_phy_ops *ops = np->phy_ops;
1924         int err;
1925
1926         err = 0;
1927         if (ops->link_status)
1928                 err = ops->link_status(np, link_up_p);
1929
1930         return err;
1931 }
1932
1933 static void niu_timer(unsigned long __opaque)
1934 {
1935         struct niu *np = (struct niu *) __opaque;
1936         unsigned long off;
1937         int err, link_up;
1938
1939         err = niu_link_status(np, &link_up);
1940         if (!err)
1941                 niu_link_status_common(np, link_up);
1942
1943         if (netif_carrier_ok(np->dev))
1944                 off = 5 * HZ;
1945         else
1946                 off = 1 * HZ;
1947         np->timer.expires = jiffies + off;
1948
1949         add_timer(&np->timer);
1950 }
1951
1952 static const struct niu_phy_ops phy_ops_10g_serdes = {
1953         .serdes_init            = serdes_init_10g_serdes,
1954         .link_status            = link_status_10g_serdes,
1955 };
1956
1957 static const struct niu_phy_ops phy_ops_1g_rgmii = {
1958         .xcvr_init              = xcvr_init_1g_rgmii,
1959         .link_status            = link_status_1g_rgmii,
1960 };
1961
1962 static const struct niu_phy_ops phy_ops_10g_fiber_niu = {
1963         .serdes_init            = serdes_init_niu,
1964         .xcvr_init              = xcvr_init_10g,
1965         .link_status            = link_status_10g,
1966 };
1967
1968 static const struct niu_phy_ops phy_ops_10g_fiber = {
1969         .serdes_init            = serdes_init_10g,
1970         .xcvr_init              = xcvr_init_10g,
1971         .link_status            = link_status_10g,
1972 };
1973
1974 static const struct niu_phy_ops phy_ops_10g_fiber_hotplug = {
1975         .serdes_init            = serdes_init_10g,
1976         .xcvr_init              = xcvr_init_10g_bcm8706,
1977         .link_status            = link_status_10g_hotplug,
1978 };
1979
1980 static const struct niu_phy_ops phy_ops_10g_copper = {
1981         .serdes_init            = serdes_init_10g,
1982         .link_status            = link_status_10g, /* XXX */
1983 };
1984
1985 static const struct niu_phy_ops phy_ops_1g_fiber = {
1986         .serdes_init            = serdes_init_1g,
1987         .xcvr_init              = xcvr_init_1g,
1988         .link_status            = link_status_1g,
1989 };
1990
1991 static const struct niu_phy_ops phy_ops_1g_copper = {
1992         .xcvr_init              = xcvr_init_1g,
1993         .link_status            = link_status_1g,
1994 };
1995
1996 struct niu_phy_template {
1997         const struct niu_phy_ops        *ops;
1998         u32                             phy_addr_base;
1999 };
2000
2001 static const struct niu_phy_template phy_template_niu = {
2002         .ops            = &phy_ops_10g_fiber_niu,
2003         .phy_addr_base  = 16,
2004 };
2005
2006 static const struct niu_phy_template phy_template_10g_fiber = {
2007         .ops            = &phy_ops_10g_fiber,
2008         .phy_addr_base  = 8,
2009 };
2010
2011 static const struct niu_phy_template phy_template_10g_fiber_hotplug = {
2012         .ops            = &phy_ops_10g_fiber_hotplug,
2013         .phy_addr_base  = 8,
2014 };
2015
2016 static const struct niu_phy_template phy_template_10g_copper = {
2017         .ops            = &phy_ops_10g_copper,
2018         .phy_addr_base  = 10,
2019 };
2020
2021 static const struct niu_phy_template phy_template_1g_fiber = {
2022         .ops            = &phy_ops_1g_fiber,
2023         .phy_addr_base  = 0,
2024 };
2025
2026 static const struct niu_phy_template phy_template_1g_copper = {
2027         .ops            = &phy_ops_1g_copper,
2028         .phy_addr_base  = 0,
2029 };
2030
2031 static const struct niu_phy_template phy_template_1g_rgmii = {
2032         .ops            = &phy_ops_1g_rgmii,
2033         .phy_addr_base  = 0,
2034 };
2035
2036 static const struct niu_phy_template phy_template_10g_serdes = {
2037         .ops            = &phy_ops_10g_serdes,
2038         .phy_addr_base  = 0,
2039 };
2040
2041 static int niu_atca_port_num[4] = {
2042         0, 0,  11, 10
2043 };
2044
2045 static int serdes_init_10g_serdes(struct niu *np)
2046 {
2047         struct niu_link_config *lp = &np->link_config;
2048         unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i;
2049         u64 ctrl_val, test_cfg_val, sig, mask, val;
2050         int err;
2051         u64 reset_val;
2052
2053         switch (np->port) {
2054         case 0:
2055                 reset_val =  ENET_SERDES_RESET_0;
2056                 ctrl_reg = ENET_SERDES_0_CTRL_CFG;
2057                 test_cfg_reg = ENET_SERDES_0_TEST_CFG;
2058                 pll_cfg = ENET_SERDES_0_PLL_CFG;
2059                 break;
2060         case 1:
2061                 reset_val =  ENET_SERDES_RESET_1;
2062                 ctrl_reg = ENET_SERDES_1_CTRL_CFG;
2063                 test_cfg_reg = ENET_SERDES_1_TEST_CFG;
2064                 pll_cfg = ENET_SERDES_1_PLL_CFG;
2065                 break;
2066
2067         default:
2068                 return -EINVAL;
2069         }
2070         ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
2071                     ENET_SERDES_CTRL_SDET_1 |
2072                     ENET_SERDES_CTRL_SDET_2 |
2073                     ENET_SERDES_CTRL_SDET_3 |
2074                     (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
2075                     (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
2076                     (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
2077                     (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
2078                     (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
2079                     (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
2080                     (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
2081                     (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
2082         test_cfg_val = 0;
2083
2084         if (lp->loopback_mode == LOOPBACK_PHY) {
2085                 test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
2086                                   ENET_SERDES_TEST_MD_0_SHIFT) |
2087                                  (ENET_TEST_MD_PAD_LOOPBACK <<
2088                                   ENET_SERDES_TEST_MD_1_SHIFT) |
2089                                  (ENET_TEST_MD_PAD_LOOPBACK <<
2090                                   ENET_SERDES_TEST_MD_2_SHIFT) |
2091                                  (ENET_TEST_MD_PAD_LOOPBACK <<
2092                                   ENET_SERDES_TEST_MD_3_SHIFT));
2093         }
2094
2095         esr_reset(np);
2096         nw64(pll_cfg, ENET_SERDES_PLL_FBDIV2);
2097         nw64(ctrl_reg, ctrl_val);
2098         nw64(test_cfg_reg, test_cfg_val);
2099
2100         /* Initialize all 4 lanes of the SERDES.  */
2101         for (i = 0; i < 4; i++) {
2102                 u32 rxtx_ctrl, glue0;
2103
2104                 err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
2105                 if (err)
2106                         return err;
2107                 err = esr_read_glue0(np, i, &glue0);
2108                 if (err)
2109                         return err;
2110
2111                 rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
2112                 rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
2113                               (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
2114
2115                 glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
2116                            ESR_GLUE_CTRL0_THCNT |
2117                            ESR_GLUE_CTRL0_BLTIME);
2118                 glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
2119                           (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
2120                           (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
2121                           (BLTIME_300_CYCLES <<
2122                            ESR_GLUE_CTRL0_BLTIME_SHIFT));
2123
2124                 err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
2125                 if (err)
2126                         return err;
2127                 err = esr_write_glue0(np, i, glue0);
2128                 if (err)
2129                         return err;
2130         }
2131
2132
2133         sig = nr64(ESR_INT_SIGNALS);
2134         switch (np->port) {
2135         case 0:
2136                 mask = ESR_INT_SIGNALS_P0_BITS;
2137                 val = (ESR_INT_SRDY0_P0 |
2138                        ESR_INT_DET0_P0 |
2139                        ESR_INT_XSRDY_P0 |
2140                        ESR_INT_XDP_P0_CH3 |
2141                        ESR_INT_XDP_P0_CH2 |
2142                        ESR_INT_XDP_P0_CH1 |
2143                        ESR_INT_XDP_P0_CH0);
2144                 break;
2145
2146         case 1:
2147                 mask = ESR_INT_SIGNALS_P1_BITS;
2148                 val = (ESR_INT_SRDY0_P1 |
2149                        ESR_INT_DET0_P1 |
2150                        ESR_INT_XSRDY_P1 |
2151                        ESR_INT_XDP_P1_CH3 |
2152                        ESR_INT_XDP_P1_CH2 |
2153                        ESR_INT_XDP_P1_CH1 |
2154                        ESR_INT_XDP_P1_CH0);
2155                 break;
2156
2157         default:
2158                 return -EINVAL;
2159         }
2160
2161         if ((sig & mask) != val) {
2162                 int err;
2163                 err = serdes_init_1g_serdes(np);
2164                 if (!err) {
2165                         np->flags &= ~NIU_FLAGS_10G;
2166                         np->mac_xcvr = MAC_XCVR_PCS;
2167                 }  else {
2168                         dev_err(np->device, PFX "Port %u 10G/1G SERDES Link Failed \n",
2169                          np->port);
2170                         return -ENODEV;
2171                 }
2172         }
2173
2174         return 0;
2175 }
2176
2177 static int niu_determine_phy_disposition(struct niu *np)
2178 {
2179         struct niu_parent *parent = np->parent;
2180         u8 plat_type = parent->plat_type;
2181         const struct niu_phy_template *tp;
2182         u32 phy_addr_off = 0;
2183
2184         if (plat_type == PLAT_TYPE_NIU) {
2185                 tp = &phy_template_niu;
2186                 phy_addr_off += np->port;
2187         } else {
2188                 switch (np->flags &
2189                         (NIU_FLAGS_10G |
2190                          NIU_FLAGS_FIBER |
2191                          NIU_FLAGS_XCVR_SERDES)) {
2192                 case 0:
2193                         /* 1G copper */
2194                         tp = &phy_template_1g_copper;
2195                         if (plat_type == PLAT_TYPE_VF_P0)
2196                                 phy_addr_off = 10;
2197                         else if (plat_type == PLAT_TYPE_VF_P1)
2198                                 phy_addr_off = 26;
2199
2200                         phy_addr_off += (np->port ^ 0x3);
2201                         break;
2202
2203                 case NIU_FLAGS_10G:
2204                         /* 10G copper */
2205                         tp = &phy_template_1g_copper;
2206                         break;
2207
2208                 case NIU_FLAGS_FIBER:
2209                         /* 1G fiber */
2210                         tp = &phy_template_1g_fiber;
2211                         break;
2212
2213                 case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
2214                         /* 10G fiber */
2215                         tp = &phy_template_10g_fiber;
2216                         if (plat_type == PLAT_TYPE_VF_P0 ||
2217                             plat_type == PLAT_TYPE_VF_P1)
2218                                 phy_addr_off = 8;
2219                         phy_addr_off += np->port;
2220                         if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
2221                                 tp = &phy_template_10g_fiber_hotplug;
2222                                 if (np->port == 0)
2223                                         phy_addr_off = 8;
2224                                 if (np->port == 1)
2225                                         phy_addr_off = 12;
2226                         }
2227                         break;
2228
2229                 case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
2230                 case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER:
2231                 case NIU_FLAGS_XCVR_SERDES:
2232                         switch(np->port) {
2233                         case 0:
2234                         case 1:
2235                                 tp = &phy_template_10g_serdes;
2236                                 break;
2237                         case 2:
2238                         case 3:
2239                                 tp = &phy_template_1g_rgmii;
2240                                 break;
2241                         default:
2242                                 return -EINVAL;
2243                                 break;
2244                         }
2245                         phy_addr_off = niu_atca_port_num[np->port];
2246                         break;
2247
2248                 default:
2249                         return -EINVAL;
2250                 }
2251         }
2252
2253         np->phy_ops = tp->ops;
2254         np->phy_addr = tp->phy_addr_base + phy_addr_off;
2255
2256         return 0;
2257 }
2258
2259 static int niu_init_link(struct niu *np)
2260 {
2261         struct niu_parent *parent = np->parent;
2262         int err, ignore;
2263
2264         if (parent->plat_type == PLAT_TYPE_NIU) {
2265                 err = niu_xcvr_init(np);
2266                 if (err)
2267                         return err;
2268                 msleep(200);
2269         }
2270         err = niu_serdes_init(np);
2271         if (err)
2272                 return err;
2273         msleep(200);
2274         err = niu_xcvr_init(np);
2275         if (!err)
2276                 niu_link_status(np, &ignore);
2277         return 0;
2278 }
2279
2280 static void niu_set_primary_mac(struct niu *np, unsigned char *addr)
2281 {
2282         u16 reg0 = addr[4] << 8 | addr[5];
2283         u16 reg1 = addr[2] << 8 | addr[3];
2284         u16 reg2 = addr[0] << 8 | addr[1];
2285
2286         if (np->flags & NIU_FLAGS_XMAC) {
2287                 nw64_mac(XMAC_ADDR0, reg0);
2288                 nw64_mac(XMAC_ADDR1, reg1);
2289                 nw64_mac(XMAC_ADDR2, reg2);
2290         } else {
2291                 nw64_mac(BMAC_ADDR0, reg0);
2292                 nw64_mac(BMAC_ADDR1, reg1);
2293                 nw64_mac(BMAC_ADDR2, reg2);
2294         }
2295 }
2296
2297 static int niu_num_alt_addr(struct niu *np)
2298 {
2299         if (np->flags & NIU_FLAGS_XMAC)
2300                 return XMAC_NUM_ALT_ADDR;
2301         else
2302                 return BMAC_NUM_ALT_ADDR;
2303 }
2304
2305 static int niu_set_alt_mac(struct niu *np, int index, unsigned char *addr)
2306 {
2307         u16 reg0 = addr[4] << 8 | addr[5];
2308         u16 reg1 = addr[2] << 8 | addr[3];
2309         u16 reg2 = addr[0] << 8 | addr[1];
2310
2311         if (index >= niu_num_alt_addr(np))
2312                 return -EINVAL;
2313
2314         if (np->flags & NIU_FLAGS_XMAC) {
2315                 nw64_mac(XMAC_ALT_ADDR0(index), reg0);
2316                 nw64_mac(XMAC_ALT_ADDR1(index), reg1);
2317                 nw64_mac(XMAC_ALT_ADDR2(index), reg2);
2318         } else {
2319                 nw64_mac(BMAC_ALT_ADDR0(index), reg0);
2320                 nw64_mac(BMAC_ALT_ADDR1(index), reg1);
2321                 nw64_mac(BMAC_ALT_ADDR2(index), reg2);
2322         }
2323
2324         return 0;
2325 }
2326
2327 static int niu_enable_alt_mac(struct niu *np, int index, int on)
2328 {
2329         unsigned long reg;
2330         u64 val, mask;
2331
2332         if (index >= niu_num_alt_addr(np))
2333                 return -EINVAL;
2334
2335         if (np->flags & NIU_FLAGS_XMAC) {
2336                 reg = XMAC_ADDR_CMPEN;
2337                 mask = 1 << index;
2338         } else {
2339                 reg = BMAC_ADDR_CMPEN;
2340                 mask = 1 << (index + 1);
2341         }
2342
2343         val = nr64_mac(reg);
2344         if (on)
2345                 val |= mask;
2346         else
2347                 val &= ~mask;
2348         nw64_mac(reg, val);
2349
2350         return 0;
2351 }
2352
2353 static void __set_rdc_table_num_hw(struct niu *np, unsigned long reg,
2354                                    int num, int mac_pref)
2355 {
2356         u64 val = nr64_mac(reg);
2357         val &= ~(HOST_INFO_MACRDCTBLN | HOST_INFO_MPR);
2358         val |= num;
2359         if (mac_pref)
2360                 val |= HOST_INFO_MPR;
2361         nw64_mac(reg, val);
2362 }
2363
2364 static int __set_rdc_table_num(struct niu *np,
2365                                int xmac_index, int bmac_index,
2366                                int rdc_table_num, int mac_pref)
2367 {
2368         unsigned long reg;
2369
2370         if (rdc_table_num & ~HOST_INFO_MACRDCTBLN)
2371                 return -EINVAL;
2372         if (np->flags & NIU_FLAGS_XMAC)
2373                 reg = XMAC_HOST_INFO(xmac_index);
2374         else
2375                 reg = BMAC_HOST_INFO(bmac_index);
2376         __set_rdc_table_num_hw(np, reg, rdc_table_num, mac_pref);
2377         return 0;
2378 }
2379
2380 static int niu_set_primary_mac_rdc_table(struct niu *np, int table_num,
2381                                          int mac_pref)
2382 {
2383         return __set_rdc_table_num(np, 17, 0, table_num, mac_pref);
2384 }
2385
2386 static int niu_set_multicast_mac_rdc_table(struct niu *np, int table_num,
2387                                            int mac_pref)
2388 {
2389         return __set_rdc_table_num(np, 16, 8, table_num, mac_pref);
2390 }
2391
2392 static int niu_set_alt_mac_rdc_table(struct niu *np, int idx,
2393                                      int table_num, int mac_pref)
2394 {
2395         if (idx >= niu_num_alt_addr(np))
2396                 return -EINVAL;
2397         return __set_rdc_table_num(np, idx, idx + 1, table_num, mac_pref);
2398 }
2399
2400 static u64 vlan_entry_set_parity(u64 reg_val)
2401 {
2402         u64 port01_mask;
2403         u64 port23_mask;
2404
2405         port01_mask = 0x00ff;
2406         port23_mask = 0xff00;
2407
2408         if (hweight64(reg_val & port01_mask) & 1)
2409                 reg_val |= ENET_VLAN_TBL_PARITY0;
2410         else
2411                 reg_val &= ~ENET_VLAN_TBL_PARITY0;
2412
2413         if (hweight64(reg_val & port23_mask) & 1)
2414                 reg_val |= ENET_VLAN_TBL_PARITY1;
2415         else
2416                 reg_val &= ~ENET_VLAN_TBL_PARITY1;
2417
2418         return reg_val;
2419 }
2420
2421 static void vlan_tbl_write(struct niu *np, unsigned long index,
2422                            int port, int vpr, int rdc_table)
2423 {
2424         u64 reg_val = nr64(ENET_VLAN_TBL(index));
2425
2426         reg_val &= ~((ENET_VLAN_TBL_VPR |
2427                       ENET_VLAN_TBL_VLANRDCTBLN) <<
2428                      ENET_VLAN_TBL_SHIFT(port));
2429         if (vpr)
2430                 reg_val |= (ENET_VLAN_TBL_VPR <<
2431                             ENET_VLAN_TBL_SHIFT(port));
2432         reg_val |= (rdc_table << ENET_VLAN_TBL_SHIFT(port));
2433
2434         reg_val = vlan_entry_set_parity(reg_val);
2435
2436         nw64(ENET_VLAN_TBL(index), reg_val);
2437 }
2438
2439 static void vlan_tbl_clear(struct niu *np)
2440 {
2441         int i;
2442
2443         for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++)
2444                 nw64(ENET_VLAN_TBL(i), 0);
2445 }
2446
2447 static int tcam_wait_bit(struct niu *np, u64 bit)
2448 {
2449         int limit = 1000;
2450
2451         while (--limit > 0) {
2452                 if (nr64(TCAM_CTL) & bit)
2453                         break;
2454                 udelay(1);
2455         }
2456         if (limit < 0)
2457                 return -ENODEV;
2458
2459         return 0;
2460 }
2461
2462 static int tcam_flush(struct niu *np, int index)
2463 {
2464         nw64(TCAM_KEY_0, 0x00);
2465         nw64(TCAM_KEY_MASK_0, 0xff);
2466         nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index));
2467
2468         return tcam_wait_bit(np, TCAM_CTL_STAT);
2469 }
2470
2471 #if 0
2472 static int tcam_read(struct niu *np, int index,
2473                      u64 *key, u64 *mask)
2474 {
2475         int err;
2476
2477         nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_READ | index));
2478         err = tcam_wait_bit(np, TCAM_CTL_STAT);
2479         if (!err) {
2480                 key[0] = nr64(TCAM_KEY_0);
2481                 key[1] = nr64(TCAM_KEY_1);
2482                 key[2] = nr64(TCAM_KEY_2);
2483                 key[3] = nr64(TCAM_KEY_3);
2484                 mask[0] = nr64(TCAM_KEY_MASK_0);
2485                 mask[1] = nr64(TCAM_KEY_MASK_1);
2486                 mask[2] = nr64(TCAM_KEY_MASK_2);
2487                 mask[3] = nr64(TCAM_KEY_MASK_3);
2488         }
2489         return err;
2490 }
2491 #endif
2492
2493 static int tcam_write(struct niu *np, int index,
2494                       u64 *key, u64 *mask)
2495 {
2496         nw64(TCAM_KEY_0, key[0]);
2497         nw64(TCAM_KEY_1, key[1]);
2498         nw64(TCAM_KEY_2, key[2]);
2499         nw64(TCAM_KEY_3, key[3]);
2500         nw64(TCAM_KEY_MASK_0, mask[0]);
2501         nw64(TCAM_KEY_MASK_1, mask[1]);
2502         nw64(TCAM_KEY_MASK_2, mask[2]);
2503         nw64(TCAM_KEY_MASK_3, mask[3]);
2504         nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index));
2505
2506         return tcam_wait_bit(np, TCAM_CTL_STAT);
2507 }
2508
2509 #if 0
2510 static int tcam_assoc_read(struct niu *np, int index, u64 *data)
2511 {
2512         int err;
2513
2514         nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_READ | index));
2515         err = tcam_wait_bit(np, TCAM_CTL_STAT);
2516         if (!err)
2517                 *data = nr64(TCAM_KEY_1);
2518
2519         return err;
2520 }
2521 #endif
2522
2523 static int tcam_assoc_write(struct niu *np, int index, u64 assoc_data)
2524 {
2525         nw64(TCAM_KEY_1, assoc_data);
2526         nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_WRITE | index));
2527
2528         return tcam_wait_bit(np, TCAM_CTL_STAT);
2529 }
2530
2531 static void tcam_enable(struct niu *np, int on)
2532 {
2533         u64 val = nr64(FFLP_CFG_1);
2534
2535         if (on)
2536                 val &= ~FFLP_CFG_1_TCAM_DIS;
2537         else
2538                 val |= FFLP_CFG_1_TCAM_DIS;
2539         nw64(FFLP_CFG_1, val);
2540 }
2541
2542 static void tcam_set_lat_and_ratio(struct niu *np, u64 latency, u64 ratio)
2543 {
2544         u64 val = nr64(FFLP_CFG_1);
2545
2546         val &= ~(FFLP_CFG_1_FFLPINITDONE |
2547                  FFLP_CFG_1_CAMLAT |
2548                  FFLP_CFG_1_CAMRATIO);
2549         val |= (latency << FFLP_CFG_1_CAMLAT_SHIFT);
2550         val |= (ratio << FFLP_CFG_1_CAMRATIO_SHIFT);
2551         nw64(FFLP_CFG_1, val);
2552
2553         val = nr64(FFLP_CFG_1);
2554         val |= FFLP_CFG_1_FFLPINITDONE;
2555         nw64(FFLP_CFG_1, val);
2556 }
2557
2558 static int tcam_user_eth_class_enable(struct niu *np, unsigned long class,
2559                                       int on)
2560 {
2561         unsigned long reg;
2562         u64 val;
2563
2564         if (class < CLASS_CODE_ETHERTYPE1 ||
2565             class > CLASS_CODE_ETHERTYPE2)
2566                 return -EINVAL;
2567
2568         reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1);
2569         val = nr64(reg);
2570         if (on)
2571                 val |= L2_CLS_VLD;
2572         else
2573                 val &= ~L2_CLS_VLD;
2574         nw64(reg, val);
2575
2576         return 0;
2577 }
2578
2579 #if 0
2580 static int tcam_user_eth_class_set(struct niu *np, unsigned long class,
2581                                    u64 ether_type)
2582 {
2583         unsigned long reg;
2584         u64 val;
2585
2586         if (class < CLASS_CODE_ETHERTYPE1 ||
2587             class > CLASS_CODE_ETHERTYPE2 ||
2588             (ether_type & ~(u64)0xffff) != 0)
2589                 return -EINVAL;
2590
2591         reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1);
2592         val = nr64(reg);
2593         val &= ~L2_CLS_ETYPE;
2594         val |= (ether_type << L2_CLS_ETYPE_SHIFT);
2595         nw64(reg, val);
2596
2597         return 0;
2598 }
2599 #endif
2600
2601 static int tcam_user_ip_class_enable(struct niu *np, unsigned long class,
2602                                      int on)
2603 {
2604         unsigned long reg;
2605         u64 val;
2606
2607         if (class < CLASS_CODE_USER_PROG1 ||
2608             class > CLASS_CODE_USER_PROG4)
2609                 return -EINVAL;
2610
2611         reg = L3_CLS(class - CLASS_CODE_USER_PROG1);
2612         val = nr64(reg);
2613         if (on)
2614                 val |= L3_CLS_VALID;
2615         else
2616                 val &= ~L3_CLS_VALID;
2617         nw64(reg, val);
2618
2619         return 0;
2620 }
2621
2622 #if 0
2623 static int tcam_user_ip_class_set(struct niu *np, unsigned long class,
2624                                   int ipv6, u64 protocol_id,
2625                                   u64 tos_mask, u64 tos_val)
2626 {
2627         unsigned long reg;
2628         u64 val;
2629
2630         if (class < CLASS_CODE_USER_PROG1 ||
2631             class > CLASS_CODE_USER_PROG4 ||
2632             (protocol_id & ~(u64)0xff) != 0 ||
2633             (tos_mask & ~(u64)0xff) != 0 ||
2634             (tos_val & ~(u64)0xff) != 0)
2635                 return -EINVAL;
2636
2637         reg = L3_CLS(class - CLASS_CODE_USER_PROG1);
2638         val = nr64(reg);
2639         val &= ~(L3_CLS_IPVER | L3_CLS_PID |
2640                  L3_CLS_TOSMASK | L3_CLS_TOS);
2641         if (ipv6)
2642                 val |= L3_CLS_IPVER;
2643         val |= (protocol_id << L3_CLS_PID_SHIFT);
2644         val |= (tos_mask << L3_CLS_TOSMASK_SHIFT);
2645         val |= (tos_val << L3_CLS_TOS_SHIFT);
2646         nw64(reg, val);
2647
2648         return 0;
2649 }
2650 #endif
2651
2652 static int tcam_early_init(struct niu *np)
2653 {
2654         unsigned long i;
2655         int err;
2656
2657         tcam_enable(np, 0);
2658         tcam_set_lat_and_ratio(np,
2659                                DEFAULT_TCAM_LATENCY,
2660                                DEFAULT_TCAM_ACCESS_RATIO);
2661         for (i = CLASS_CODE_ETHERTYPE1; i <= CLASS_CODE_ETHERTYPE2; i++) {
2662                 err = tcam_user_eth_class_enable(np, i, 0);
2663                 if (err)
2664                         return err;
2665         }
2666         for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_USER_PROG4; i++) {
2667                 err = tcam_user_ip_class_enable(np, i, 0);
2668                 if (err)
2669                         return err;
2670         }
2671
2672         return 0;
2673 }
2674
2675 static int tcam_flush_all(struct niu *np)
2676 {
2677         unsigned long i;
2678
2679         for (i = 0; i < np->parent->tcam_num_entries; i++) {
2680                 int err = tcam_flush(np, i);
2681                 if (err)
2682                         return err;
2683         }
2684         return 0;
2685 }
2686
2687 static u64 hash_addr_regval(unsigned long index, unsigned long num_entries)
2688 {
2689         return ((u64)index | (num_entries == 1 ?
2690                               HASH_TBL_ADDR_AUTOINC : 0));
2691 }
2692
2693 #if 0
2694 static int hash_read(struct niu *np, unsigned long partition,
2695                      unsigned long index, unsigned long num_entries,
2696                      u64 *data)
2697 {
2698         u64 val = hash_addr_regval(index, num_entries);
2699         unsigned long i;
2700
2701         if (partition >= FCRAM_NUM_PARTITIONS ||
2702             index + num_entries > FCRAM_SIZE)
2703                 return -EINVAL;
2704
2705         nw64(HASH_TBL_ADDR(partition), val);
2706         for (i = 0; i < num_entries; i++)
2707                 data[i] = nr64(HASH_TBL_DATA(partition));
2708
2709         return 0;
2710 }
2711 #endif
2712
2713 static int hash_write(struct niu *np, unsigned long partition,
2714                       unsigned long index, unsigned long num_entries,
2715                       u64 *data)
2716 {
2717         u64 val = hash_addr_regval(index, num_entries);
2718         unsigned long i;
2719
2720         if (partition >= FCRAM_NUM_PARTITIONS ||
2721             index + (num_entries * 8) > FCRAM_SIZE)
2722                 return -EINVAL;
2723
2724         nw64(HASH_TBL_ADDR(partition), val);
2725         for (i = 0; i < num_entries; i++)
2726                 nw64(HASH_TBL_DATA(partition), data[i]);
2727
2728         return 0;
2729 }
2730
2731 static void fflp_reset(struct niu *np)
2732 {
2733         u64 val;
2734
2735         nw64(FFLP_CFG_1, FFLP_CFG_1_PIO_FIO_RST);
2736         udelay(10);
2737         nw64(FFLP_CFG_1, 0);
2738
2739         val = FFLP_CFG_1_FCRAMOUTDR_NORMAL | FFLP_CFG_1_FFLPINITDONE;
2740         nw64(FFLP_CFG_1, val);
2741 }
2742
2743 static void fflp_set_timings(struct niu *np)
2744 {
2745         u64 val = nr64(FFLP_CFG_1);
2746
2747         val &= ~FFLP_CFG_1_FFLPINITDONE;
2748         val |= (DEFAULT_FCRAMRATIO << FFLP_CFG_1_FCRAMRATIO_SHIFT);
2749         nw64(FFLP_CFG_1, val);
2750
2751         val = nr64(FFLP_CFG_1);
2752         val |= FFLP_CFG_1_FFLPINITDONE;
2753         nw64(FFLP_CFG_1, val);
2754
2755         val = nr64(FCRAM_REF_TMR);
2756         val &= ~(FCRAM_REF_TMR_MAX | FCRAM_REF_TMR_MIN);
2757         val |= (DEFAULT_FCRAM_REFRESH_MAX << FCRAM_REF_TMR_MAX_SHIFT);
2758         val |= (DEFAULT_FCRAM_REFRESH_MIN << FCRAM_REF_TMR_MIN_SHIFT);
2759         nw64(FCRAM_REF_TMR, val);
2760 }
2761
2762 static int fflp_set_partition(struct niu *np, u64 partition,
2763                               u64 mask, u64 base, int enable)
2764 {
2765         unsigned long reg;
2766         u64 val;
2767
2768         if (partition >= FCRAM_NUM_PARTITIONS ||
2769             (mask & ~(u64)0x1f) != 0 ||
2770             (base & ~(u64)0x1f) != 0)
2771                 return -EINVAL;
2772
2773         reg = FLW_PRT_SEL(partition);
2774
2775         val = nr64(reg);
2776         val &= ~(FLW_PRT_SEL_EXT | FLW_PRT_SEL_MASK | FLW_PRT_SEL_BASE);
2777         val |= (mask << FLW_PRT_SEL_MASK_SHIFT);
2778         val |= (base << FLW_PRT_SEL_BASE_SHIFT);
2779         if (enable)
2780                 val |= FLW_PRT_SEL_EXT;
2781         nw64(reg, val);
2782
2783         return 0;
2784 }
2785
2786 static int fflp_disable_all_partitions(struct niu *np)
2787 {
2788         unsigned long i;
2789
2790         for (i = 0; i < FCRAM_NUM_PARTITIONS; i++) {
2791                 int err = fflp_set_partition(np, 0, 0, 0, 0);
2792                 if (err)
2793                         return err;
2794         }
2795         return 0;
2796 }
2797
2798 static void fflp_llcsnap_enable(struct niu *np, int on)
2799 {
2800         u64 val = nr64(FFLP_CFG_1);
2801
2802         if (on)
2803                 val |= FFLP_CFG_1_LLCSNAP;
2804         else
2805                 val &= ~FFLP_CFG_1_LLCSNAP;
2806         nw64(FFLP_CFG_1, val);
2807 }
2808
2809 static void fflp_errors_enable(struct niu *np, int on)
2810 {
2811         u64 val = nr64(FFLP_CFG_1);
2812
2813         if (on)
2814                 val &= ~FFLP_CFG_1_ERRORDIS;
2815         else
2816                 val |= FFLP_CFG_1_ERRORDIS;
2817         nw64(FFLP_CFG_1, val);
2818 }
2819
2820 static int fflp_hash_clear(struct niu *np)
2821 {
2822         struct fcram_hash_ipv4 ent;
2823         unsigned long i;
2824
2825         /* IPV4 hash entry with valid bit clear, rest is don't care.  */
2826         memset(&ent, 0, sizeof(ent));
2827         ent.header = HASH_HEADER_EXT;
2828
2829         for (i = 0; i < FCRAM_SIZE; i += sizeof(ent)) {
2830                 int err = hash_write(np, 0, i, 1, (u64 *) &ent);
2831                 if (err)
2832                         return err;
2833         }
2834         return 0;
2835 }
2836
2837 static int fflp_early_init(struct niu *np)
2838 {
2839         struct niu_parent *parent;
2840         unsigned long flags;
2841         int err;
2842
2843         niu_lock_parent(np, flags);
2844
2845         parent = np->parent;
2846         err = 0;
2847         if (!(parent->flags & PARENT_FLGS_CLS_HWINIT)) {
2848                 niudbg(PROBE, "fflp_early_init: Initting hw on port %u\n",
2849                        np->port);
2850                 if (np->parent->plat_type != PLAT_TYPE_NIU) {
2851                         fflp_reset(np);
2852                         fflp_set_timings(np);
2853                         err = fflp_disable_all_partitions(np);
2854                         if (err) {
2855                                 niudbg(PROBE, "fflp_disable_all_partitions "
2856                                        "failed, err=%d\n", err);
2857                                 goto out;
2858                         }
2859                 }
2860
2861                 err = tcam_early_init(np);
2862                 if (err) {
2863                         niudbg(PROBE, "tcam_early_init failed, err=%d\n",
2864                                err);
2865                         goto out;
2866                 }
2867                 fflp_llcsnap_enable(np, 1);
2868                 fflp_errors_enable(np, 0);
2869                 nw64(H1POLY, 0);
2870                 nw64(H2POLY, 0);
2871
2872                 err = tcam_flush_all(np);
2873                 if (err) {
2874                         niudbg(PROBE, "tcam_flush_all failed, err=%d\n",
2875                                err);
2876                         goto out;
2877                 }
2878                 if (np->parent->plat_type != PLAT_TYPE_NIU) {
2879                         err = fflp_hash_clear(np);
2880                         if (err) {
2881                                 niudbg(PROBE, "fflp_hash_clear failed, "
2882                                        "err=%d\n", err);
2883                                 goto out;
2884                         }
2885                 }
2886
2887                 vlan_tbl_clear(np);
2888
2889                 niudbg(PROBE, "fflp_early_init: Success\n");
2890                 parent->flags |= PARENT_FLGS_CLS_HWINIT;
2891         }
2892 out:
2893         niu_unlock_parent(np, flags);
2894         return err;
2895 }
2896
2897 static int niu_set_flow_key(struct niu *np, unsigned long class_code, u64 key)
2898 {
2899         if (class_code < CLASS_CODE_USER_PROG1 ||
2900             class_code > CLASS_CODE_SCTP_IPV6)
2901                 return -EINVAL;
2902
2903         nw64(FLOW_KEY(class_code - CLASS_CODE_USER_PROG1), key);
2904         return 0;
2905 }
2906
2907 static int niu_set_tcam_key(struct niu *np, unsigned long class_code, u64 key)
2908 {
2909         if (class_code < CLASS_CODE_USER_PROG1 ||
2910             class_code > CLASS_CODE_SCTP_IPV6)
2911                 return -EINVAL;
2912
2913         nw64(TCAM_KEY(class_code - CLASS_CODE_USER_PROG1), key);
2914         return 0;
2915 }
2916
2917 static void niu_rx_skb_append(struct sk_buff *skb, struct page *page,
2918                               u32 offset, u32 size)
2919 {
2920         int i = skb_shinfo(skb)->nr_frags;
2921         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2922
2923         frag->page = page;
2924         frag->page_offset = offset;
2925         frag->size = size;
2926
2927         skb->len += size;
2928         skb->data_len += size;
2929         skb->truesize += size;
2930
2931         skb_shinfo(skb)->nr_frags = i + 1;
2932 }
2933
2934 static unsigned int niu_hash_rxaddr(struct rx_ring_info *rp, u64 a)
2935 {
2936         a >>= PAGE_SHIFT;
2937         a ^= (a >> ilog2(MAX_RBR_RING_SIZE));
2938
2939         return (a & (MAX_RBR_RING_SIZE - 1));
2940 }
2941
2942 static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr,
2943                                     struct page ***link)
2944 {
2945         unsigned int h = niu_hash_rxaddr(rp, addr);
2946         struct page *p, **pp;
2947
2948         addr &= PAGE_MASK;
2949         pp = &rp->rxhash[h];
2950         for (; (p = *pp) != NULL; pp = (struct page **) &p->mapping) {
2951                 if (p->index == addr) {
2952                         *link = pp;
2953                         break;
2954                 }
2955         }
2956
2957         return p;
2958 }
2959
2960 static void niu_hash_page(struct rx_ring_info *rp, struct page *page, u64 base)
2961 {
2962         unsigned int h = niu_hash_rxaddr(rp, base);
2963
2964         page->index = base;
2965         page->mapping = (struct address_space *) rp->rxhash[h];
2966         rp->rxhash[h] = page;
2967 }
2968
2969 static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp,
2970                             gfp_t mask, int start_index)
2971 {
2972         struct page *page;
2973         u64 addr;
2974         int i;
2975
2976         page = alloc_page(mask);
2977         if (!page)
2978                 return -ENOMEM;
2979
2980         addr = np->ops->map_page(np->device, page, 0,
2981                                  PAGE_SIZE, DMA_FROM_DEVICE);
2982
2983         niu_hash_page(rp, page, addr);
2984         if (rp->rbr_blocks_per_page > 1)
2985                 atomic_add(rp->rbr_blocks_per_page - 1,
2986                            &compound_head(page)->_count);
2987
2988         for (i = 0; i < rp->rbr_blocks_per_page; i++) {
2989                 __le32 *rbr = &rp->rbr[start_index + i];
2990
2991                 *rbr = cpu_to_le32(addr >> RBR_DESCR_ADDR_SHIFT);
2992                 addr += rp->rbr_block_size;
2993         }
2994
2995         return 0;
2996 }
2997
2998 static void niu_rbr_refill(struct niu *np, struct rx_ring_info *rp, gfp_t mask)
2999 {
3000         int index = rp->rbr_index;
3001
3002         rp->rbr_pending++;
3003         if ((rp->rbr_pending % rp->rbr_blocks_per_page) == 0) {
3004                 int err = niu_rbr_add_page(np, rp, mask, index);
3005
3006                 if (unlikely(err)) {
3007                         rp->rbr_pending--;
3008                         return;
3009                 }
3010
3011                 rp->rbr_index += rp->rbr_blocks_per_page;
3012                 BUG_ON(rp->rbr_index > rp->rbr_table_size);
3013                 if (rp->rbr_index == rp->rbr_table_size)
3014                         rp->rbr_index = 0;
3015
3016                 if (rp->rbr_pending >= rp->rbr_kick_thresh) {
3017                         nw64(RBR_KICK(rp->rx_channel), rp->rbr_pending);
3018                         rp->rbr_pending = 0;
3019                 }
3020         }
3021 }
3022
3023 static int niu_rx_pkt_ignore(struct niu *np, struct rx_ring_info *rp)
3024 {
3025         unsigned int index = rp->rcr_index;
3026         int num_rcr = 0;
3027
3028         rp->rx_dropped++;
3029         while (1) {
3030                 struct page *page, **link;
3031                 u64 addr, val;
3032                 u32 rcr_size;
3033
3034                 num_rcr++;
3035
3036                 val = le64_to_cpup(&rp->rcr[index]);
3037                 addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
3038                         RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
3039                 page = niu_find_rxpage(rp, addr, &link);
3040
3041                 rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
3042                                          RCR_ENTRY_PKTBUFSZ_SHIFT];
3043                 if ((page->index + PAGE_SIZE) - rcr_size == addr) {
3044                         *link = (struct page *) page->mapping;
3045                         np->ops->unmap_page(np->device, page->index,
3046                                             PAGE_SIZE, DMA_FROM_DEVICE);
3047                         page->index = 0;
3048                         page->mapping = NULL;
3049                         __free_page(page);
3050                         rp->rbr_refill_pending++;
3051                 }
3052
3053                 index = NEXT_RCR(rp, index);
3054                 if (!(val & RCR_ENTRY_MULTI))
3055                         break;
3056
3057         }
3058         rp->rcr_index = index;
3059
3060         return num_rcr;
3061 }
3062
3063 static int niu_process_rx_pkt(struct niu *np, struct rx_ring_info *rp)
3064 {
3065         unsigned int index = rp->rcr_index;
3066         struct sk_buff *skb;
3067         int len, num_rcr;
3068
3069         skb = netdev_alloc_skb(np->dev, RX_SKB_ALLOC_SIZE);
3070         if (unlikely(!skb))
3071                 return niu_rx_pkt_ignore(np, rp);
3072
3073         num_rcr = 0;
3074         while (1) {
3075                 struct page *page, **link;
3076                 u32 rcr_size, append_size;
3077                 u64 addr, val, off;
3078
3079                 num_rcr++;
3080
3081                 val = le64_to_cpup(&rp->rcr[index]);
3082
3083                 len = (val & RCR_ENTRY_L2_LEN) >>
3084                         RCR_ENTRY_L2_LEN_SHIFT;
3085                 len -= ETH_FCS_LEN;
3086
3087                 addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
3088                         RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
3089                 page = niu_find_rxpage(rp, addr, &link);
3090
3091                 rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
3092                                          RCR_ENTRY_PKTBUFSZ_SHIFT];
3093
3094                 off = addr & ~PAGE_MASK;
3095                 append_size = rcr_size;
3096                 if (num_rcr == 1) {
3097                         int ptype;
3098
3099                         off += 2;
3100                         append_size -= 2;
3101
3102                         ptype = (val >> RCR_ENTRY_PKT_TYPE_SHIFT);
3103                         if ((ptype == RCR_PKT_TYPE_TCP ||
3104                              ptype == RCR_PKT_TYPE_UDP) &&
3105                             !(val & (RCR_ENTRY_NOPORT |
3106                                      RCR_ENTRY_ERROR)))
3107                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3108                         else
3109                                 skb->ip_summed = CHECKSUM_NONE;
3110                 }
3111                 if (!(val & RCR_ENTRY_MULTI))
3112                         append_size = len - skb->len;
3113
3114                 niu_rx_skb_append(skb, page, off, append_size);
3115                 if ((page->index + rp->rbr_block_size) - rcr_size == addr) {
3116                         *link = (struct page *) page->mapping;
3117                         np->ops->unmap_page(np->device, page->index,
3118                                             PAGE_SIZE, DMA_FROM_DEVICE);
3119                         page->index = 0;
3120                         page->mapping = NULL;
3121                         rp->rbr_refill_pending++;
3122                 } else
3123                         get_page(page);
3124
3125                 index = NEXT_RCR(rp, index);
3126                 if (!(val & RCR_ENTRY_MULTI))
3127                         break;
3128
3129         }
3130         rp->rcr_index = index;
3131
3132         skb_reserve(skb, NET_IP_ALIGN);
3133         __pskb_pull_tail(skb, min(len, NIU_RXPULL_MAX));
3134
3135         rp->rx_packets++;
3136         rp->rx_bytes += skb->len;
3137
3138         skb->protocol = eth_type_trans(skb, np->dev);
3139         netif_receive_skb(skb);
3140
3141         np->dev->last_rx = jiffies;
3142
3143         return num_rcr;
3144 }
3145
3146 static int niu_rbr_fill(struct niu *np, struct rx_ring_info *rp, gfp_t mask)
3147 {
3148         int blocks_per_page = rp->rbr_blocks_per_page;
3149         int err, index = rp->rbr_index;
3150
3151         err = 0;
3152         while (index < (rp->rbr_table_size - blocks_per_page)) {
3153                 err = niu_rbr_add_page(np, rp, mask, index);
3154                 if (err)
3155                         break;
3156
3157                 index += blocks_per_page;
3158         }
3159
3160         rp->rbr_index = index;
3161         return err;
3162 }
3163
3164 static void niu_rbr_free(struct niu *np, struct rx_ring_info *rp)
3165 {
3166         int i;
3167
3168         for (i = 0; i < MAX_RBR_RING_SIZE; i++) {
3169                 struct page *page;
3170
3171                 page = rp->rxhash[i];
3172                 while (page) {
3173                         struct page *next = (struct page *) page->mapping;
3174                         u64 base = page->index;
3175
3176                         np->ops->unmap_page(np->device, base, PAGE_SIZE,
3177                                             DMA_FROM_DEVICE);
3178                         page->index = 0;
3179                         page->mapping = NULL;
3180
3181                         __free_page(page);
3182
3183                         page = next;
3184                 }
3185         }
3186
3187         for (i = 0; i < rp->rbr_table_size; i++)
3188                 rp->rbr[i] = cpu_to_le32(0);
3189         rp->rbr_index = 0;
3190 }
3191
3192 static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx)
3193 {
3194         struct tx_buff_info *tb = &rp->tx_buffs[idx];
3195         struct sk_buff *skb = tb->skb;
3196         struct tx_pkt_hdr *tp;
3197         u64 tx_flags;
3198         int i, len;
3199
3200         tp = (struct tx_pkt_hdr *) skb->data;
3201         tx_flags = le64_to_cpup(&tp->flags);
3202
3203         rp->tx_packets++;
3204         rp->tx_bytes += (((tx_flags & TXHDR_LEN) >> TXHDR_LEN_SHIFT) -
3205                          ((tx_flags & TXHDR_PAD) / 2));
3206
3207         len = skb_headlen(skb);
3208         np->ops->unmap_single(np->device, tb->mapping,
3209                               len, DMA_TO_DEVICE);
3210
3211         if (le64_to_cpu(rp->descr[idx]) & TX_DESC_MARK)
3212                 rp->mark_pending--;
3213
3214         tb->skb = NULL;
3215         do {
3216                 idx = NEXT_TX(rp, idx);
3217                 len -= MAX_TX_DESC_LEN;
3218         } while (len > 0);
3219
3220         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3221                 tb = &rp->tx_buffs[idx];
3222                 BUG_ON(tb->skb != NULL);
3223                 np->ops->unmap_page(np->device, tb->mapping,
3224                                     skb_shinfo(skb)->frags[i].size,
3225                                     DMA_TO_DEVICE);
3226                 idx = NEXT_TX(rp, idx);
3227         }
3228
3229         dev_kfree_skb(skb);
3230
3231         return idx;
3232 }
3233
3234 #define NIU_TX_WAKEUP_THRESH(rp)                ((rp)->pending / 4)
3235
3236 static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
3237 {
3238         struct netdev_queue *txq;
3239         u16 pkt_cnt, tmp;
3240         int cons, index;
3241         u64 cs;
3242
3243         index = (rp - np->tx_rings);
3244         txq = netdev_get_tx_queue(np->dev, index);
3245
3246         cs = rp->tx_cs;
3247         if (unlikely(!(cs & (TX_CS_MK | TX_CS_MMK))))
3248                 goto out;
3249
3250         tmp = pkt_cnt = (cs & TX_CS_PKT_CNT) >> TX_CS_PKT_CNT_SHIFT;
3251         pkt_cnt = (pkt_cnt - rp->last_pkt_cnt) &
3252                 (TX_CS_PKT_CNT >> TX_CS_PKT_CNT_SHIFT);
3253
3254         rp->last_pkt_cnt = tmp;
3255
3256         cons = rp->cons;
3257
3258         niudbg(TX_DONE, "%s: niu_tx_work() pkt_cnt[%u] cons[%d]\n",
3259                np->dev->name, pkt_cnt, cons);
3260
3261         while (pkt_cnt--)
3262                 cons = release_tx_packet(np, rp, cons);
3263
3264         rp->cons = cons;
3265         smp_mb();
3266
3267 out:
3268         if (unlikely(netif_tx_queue_stopped(txq) &&
3269                      (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) {
3270                 __netif_tx_lock(txq, smp_processor_id());
3271                 if (netif_tx_queue_stopped(txq) &&
3272                     (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))
3273                         netif_tx_wake_queue(txq);
3274                 __netif_tx_unlock(txq);
3275         }
3276 }
3277
3278 static int niu_rx_work(struct niu *np, struct rx_ring_info *rp, int budget)
3279 {
3280         int qlen, rcr_done = 0, work_done = 0;
3281         struct rxdma_mailbox *mbox = rp->mbox;
3282         u64 stat;
3283
3284 #if 1
3285         stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel));
3286         qlen = nr64(RCRSTAT_A(rp->rx_channel)) & RCRSTAT_A_QLEN;
3287 #else
3288         stat = le64_to_cpup(&mbox->rx_dma_ctl_stat);
3289         qlen = (le64_to_cpup(&mbox->rcrstat_a) & RCRSTAT_A_QLEN);
3290 #endif
3291         mbox->rx_dma_ctl_stat = 0;
3292         mbox->rcrstat_a = 0;
3293
3294         niudbg(RX_STATUS, "%s: niu_rx_work(chan[%d]), stat[%llx] qlen=%d\n",
3295                np->dev->name, rp->rx_channel, (unsigned long long) stat, qlen);
3296
3297         rcr_done = work_done = 0;
3298         qlen = min(qlen, budget);
3299         while (work_done < qlen) {
3300                 rcr_done += niu_process_rx_pkt(np, rp);
3301                 work_done++;
3302         }
3303
3304         if (rp->rbr_refill_pending >= rp->rbr_kick_thresh) {
3305                 unsigned int i;
3306
3307                 for (i = 0; i < rp->rbr_refill_pending; i++)
3308                         niu_rbr_refill(np, rp, GFP_ATOMIC);
3309                 rp->rbr_refill_pending = 0;
3310         }
3311
3312         stat = (RX_DMA_CTL_STAT_MEX |
3313                 ((u64)work_done << RX_DMA_CTL_STAT_PKTREAD_SHIFT) |
3314                 ((u64)rcr_done << RX_DMA_CTL_STAT_PTRREAD_SHIFT));
3315
3316         nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat);
3317
3318         return work_done;
3319 }
3320
3321 static int niu_poll_core(struct niu *np, struct niu_ldg *lp, int budget)
3322 {
3323         u64 v0 = lp->v0;
3324         u32 tx_vec = (v0 >> 32);
3325         u32 rx_vec = (v0 & 0xffffffff);
3326         int i, work_done = 0;
3327
3328         niudbg(INTR, "%s: niu_poll_core() v0[%016llx]\n",
3329                np->dev->name, (unsigned long long) v0);
3330
3331         for (i = 0; i < np->num_tx_rings; i++) {
3332                 struct tx_ring_info *rp = &np->tx_rings[i];
3333                 if (tx_vec & (1 << rp->tx_channel))
3334                         niu_tx_work(np, rp);
3335                 nw64(LD_IM0(LDN_TXDMA(rp->tx_channel)), 0);
3336         }
3337
3338         for (i = 0; i < np->num_rx_rings; i++) {
3339                 struct rx_ring_info *rp = &np->rx_rings[i];
3340
3341                 if (rx_vec & (1 << rp->rx_channel)) {
3342                         int this_work_done;
3343
3344                         this_work_done = niu_rx_work(np, rp,
3345                                                      budget);
3346
3347                         budget -= this_work_done;
3348                         work_done += this_work_done;
3349                 }
3350                 nw64(LD_IM0(LDN_RXDMA(rp->rx_channel)), 0);
3351         }
3352
3353         return work_done;
3354 }
3355
3356 static int niu_poll(struct napi_struct *napi, int budget)
3357 {
3358         struct niu_ldg *lp = container_of(napi, struct niu_ldg, napi);
3359         struct niu *np = lp->np;
3360         int work_done;
3361
3362         work_done = niu_poll_core(np, lp, budget);
3363
3364         if (work_done < budget) {
3365                 netif_rx_complete(np->dev, napi);
3366                 niu_ldg_rearm(np, lp, 1);
3367         }
3368         return work_done;
3369 }
3370
3371 static void niu_log_rxchan_errors(struct niu *np, struct rx_ring_info *rp,
3372                                   u64 stat)
3373 {
3374         dev_err(np->device, PFX "%s: RX channel %u errors ( ",
3375                 np->dev->name, rp->rx_channel);
3376
3377         if (stat & RX_DMA_CTL_STAT_RBR_TMOUT)
3378                 printk("RBR_TMOUT ");
3379         if (stat & RX_DMA_CTL_STAT_RSP_CNT_ERR)
3380                 printk("RSP_CNT ");
3381         if (stat & RX_DMA_CTL_STAT_BYTE_EN_BUS)
3382                 printk("BYTE_EN_BUS ");
3383         if (stat & RX_DMA_CTL_STAT_RSP_DAT_ERR)
3384                 printk("RSP_DAT ");
3385         if (stat & RX_DMA_CTL_STAT_RCR_ACK_ERR)
3386                 printk("RCR_ACK ");
3387         if (stat & RX_DMA_CTL_STAT_RCR_SHA_PAR)
3388                 printk("RCR_SHA_PAR ");
3389         if (stat & RX_DMA_CTL_STAT_RBR_PRE_PAR)
3390                 printk("RBR_PRE_PAR ");
3391         if (stat & RX_DMA_CTL_STAT_CONFIG_ERR)
3392                 printk("CONFIG ");
3393         if (stat & RX_DMA_CTL_STAT_RCRINCON)
3394                 printk("RCRINCON ");
3395         if (stat & RX_DMA_CTL_STAT_RCRFULL)
3396                 printk("RCRFULL ");
3397         if (stat & RX_DMA_CTL_STAT_RBRFULL)
3398                 printk("RBRFULL ");
3399         if (stat & RX_DMA_CTL_STAT_RBRLOGPAGE)
3400                 printk("RBRLOGPAGE ");
3401         if (stat & RX_DMA_CTL_STAT_CFIGLOGPAGE)
3402                 printk("CFIGLOGPAGE ");
3403         if (stat & RX_DMA_CTL_STAT_DC_FIFO_ERR)
3404                 printk("DC_FIDO ");
3405
3406         printk(")\n");
3407 }
3408
3409 static int niu_rx_error(struct niu *np, struct rx_ring_info *rp)
3410 {
3411         u64 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel));
3412         int err = 0;
3413
3414
3415         if (stat & (RX_DMA_CTL_STAT_CHAN_FATAL |
3416                     RX_DMA_CTL_STAT_PORT_FATAL))
3417                 err = -EINVAL;
3418
3419         if (err) {
3420                 dev_err(np->device, PFX "%s: RX channel %u error, stat[%llx]\n",
3421                         np->dev->name, rp->rx_channel,
3422                         (unsigned long long) stat);
3423
3424                 niu_log_rxchan_errors(np, rp, stat);
3425         }
3426
3427         nw64(RX_DMA_CTL_STAT(rp->rx_channel),
3428              stat & RX_DMA_CTL_WRITE_CLEAR_ERRS);
3429
3430         return err;
3431 }
3432
3433 static void niu_log_txchan_errors(struct niu *np, struct tx_ring_info *rp,
3434                                   u64 cs)
3435 {
3436         dev_err(np->device, PFX "%s: TX channel %u errors ( ",
3437                 np->dev->name, rp->tx_channel);
3438
3439         if (cs & TX_CS_MBOX_ERR)
3440                 printk("MBOX ");
3441         if (cs & TX_CS_PKT_SIZE_ERR)
3442                 printk("PKT_SIZE ");
3443         if (cs & TX_CS_TX_RING_OFLOW)
3444                 printk("TX_RING_OFLOW ");
3445         if (cs & TX_CS_PREF_BUF_PAR_ERR)
3446                 printk("PREF_BUF_PAR ");
3447         if (cs & TX_CS_NACK_PREF)
3448                 printk("NACK_PREF ");
3449         if (cs & TX_CS_NACK_PKT_RD)
3450                 printk("NACK_PKT_RD ");
3451         if (cs & TX_CS_CONF_PART_ERR)
3452                 printk("CONF_PART ");
3453         if (cs & TX_CS_PKT_PRT_ERR)
3454                 printk("PKT_PTR ");
3455
3456         printk(")\n");
3457 }
3458
3459 static int niu_tx_error(struct niu *np, struct tx_ring_info *rp)
3460 {
3461         u64 cs, logh, logl;
3462
3463         cs = nr64(TX_CS(rp->tx_channel));
3464         logh = nr64(TX_RNG_ERR_LOGH(rp->tx_channel));
3465         logl = nr64(TX_RNG_ERR_LOGL(rp->tx_channel));
3466
3467         dev_err(np->device, PFX "%s: TX channel %u error, "
3468                 "cs[%llx] logh[%llx] logl[%llx]\n",
3469                 np->dev->name, rp->tx_channel,
3470                 (unsigned long long) cs,
3471                 (unsigned long long) logh,
3472                 (unsigned long long) logl);
3473
3474         niu_log_txchan_errors(np, rp, cs);
3475
3476         return -ENODEV;
3477 }
3478
3479 static int niu_mif_interrupt(struct niu *np)
3480 {
3481         u64 mif_status = nr64(MIF_STATUS);
3482         int phy_mdint = 0;
3483
3484         if (np->flags & NIU_FLAGS_XMAC) {
3485                 u64 xrxmac_stat = nr64_mac(XRXMAC_STATUS);
3486
3487                 if (xrxmac_stat & XRXMAC_STATUS_PHY_MDINT)
3488                         phy_mdint = 1;
3489         }
3490
3491         dev_err(np->device, PFX "%s: MIF interrupt, "
3492                 "stat[%llx] phy_mdint(%d)\n",
3493                 np->dev->name, (unsigned long long) mif_status, phy_mdint);
3494
3495         return -ENODEV;
3496 }
3497
3498 static void niu_xmac_interrupt(struct niu *np)
3499 {
3500         struct niu_xmac_stats *mp = &np->mac_stats.xmac;
3501         u64 val;
3502
3503         val = nr64_mac(XTXMAC_STATUS);
3504         if (val & XTXMAC_STATUS_FRAME_CNT_EXP)
3505                 mp->tx_frames += TXMAC_FRM_CNT_COUNT;
3506         if (val & XTXMAC_STATUS_BYTE_CNT_EXP)
3507                 mp->tx_bytes += TXMAC_BYTE_CNT_COUNT;
3508         if (val & XTXMAC_STATUS_TXFIFO_XFR_ERR)
3509                 mp->tx_fifo_errors++;
3510         if (val & XTXMAC_STATUS_TXMAC_OFLOW)
3511                 mp->tx_overflow_errors++;
3512         if (val & XTXMAC_STATUS_MAX_PSIZE_ERR)
3513                 mp->tx_max_pkt_size_errors++;
3514         if (val & XTXMAC_STATUS_TXMAC_UFLOW)
3515                 mp->tx_underflow_errors++;
3516
3517         val = nr64_mac(XRXMAC_STATUS);
3518         if (val & XRXMAC_STATUS_LCL_FLT_STATUS)
3519                 mp->rx_local_faults++;
3520         if (val & XRXMAC_STATUS_RFLT_DET)
3521                 mp->rx_remote_faults++;
3522         if (val & XRXMAC_STATUS_LFLT_CNT_EXP)
3523                 mp->rx_link_faults += LINK_FAULT_CNT_COUNT;
3524         if (val & XRXMAC_STATUS_ALIGNERR_CNT_EXP)
3525                 mp->rx_align_errors += RXMAC_ALIGN_ERR_CNT_COUNT;
3526         if (val & XRXMAC_STATUS_RXFRAG_CNT_EXP)
3527                 mp->rx_frags += RXMAC_FRAG_CNT_COUNT;
3528         if (val & XRXMAC_STATUS_RXMULTF_CNT_EXP)
3529                 mp->rx_mcasts += RXMAC_MC_FRM_CNT_COUNT;
3530         if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
3531                 mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
3532         if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
3533                 mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
3534         if (val & XRXMAC_STATUS_RXHIST1_CNT_EXP)
3535                 mp->rx_hist_cnt1 += RXMAC_HIST_CNT1_COUNT;
3536         if (val & XRXMAC_STATUS_RXHIST2_CNT_EXP)
3537                 mp->rx_hist_cnt2 += RXMAC_HIST_CNT2_COUNT;
3538         if (val & XRXMAC_STATUS_RXHIST3_CNT_EXP)
3539                 mp->rx_hist_cnt3 += RXMAC_HIST_CNT3_COUNT;
3540         if (val & XRXMAC_STATUS_RXHIST4_CNT_EXP)
3541                 mp->rx_hist_cnt4 += RXMAC_HIST_CNT4_COUNT;
3542         if (val & XRXMAC_STATUS_RXHIST5_CNT_EXP)
3543                 mp->rx_hist_cnt5 += RXMAC_HIST_CNT5_COUNT;
3544         if (val & XRXMAC_STATUS_RXHIST6_CNT_EXP)
3545                 mp->rx_hist_cnt6 += RXMAC_HIST_CNT6_COUNT;
3546         if (val & XRXMAC_STATUS_RXHIST7_CNT_EXP)
3547                 mp->rx_hist_cnt7 += RXMAC_HIST_CNT7_COUNT;
3548         if (val & XRXMAC_STAT_MSK_RXOCTET_CNT_EXP)
3549                 mp->rx_octets += RXMAC_BT_CNT_COUNT;
3550         if (val & XRXMAC_STATUS_CVIOLERR_CNT_EXP)
3551                 mp->rx_code_violations += RXMAC_CD_VIO_CNT_COUNT;
3552         if (val & XRXMAC_STATUS_LENERR_CNT_EXP)
3553                 mp->rx_len_errors += RXMAC_MPSZER_CNT_COUNT;
3554         if (val & XRXMAC_STATUS_CRCERR_CNT_EXP)
3555                 mp->rx_crc_errors += RXMAC_CRC_ER_CNT_COUNT;
3556         if (val & XRXMAC_STATUS_RXUFLOW)
3557                 mp->rx_underflows++;
3558         if (val & XRXMAC_STATUS_RXOFLOW)
3559                 mp->rx_overflows++;
3560
3561         val = nr64_mac(XMAC_FC_STAT);
3562         if (val & XMAC_FC_STAT_TX_MAC_NPAUSE)
3563                 mp->pause_off_state++;
3564         if (val & XMAC_FC_STAT_TX_MAC_PAUSE)
3565                 mp->pause_on_state++;
3566         if (val & XMAC_FC_STAT_RX_MAC_RPAUSE)
3567                 mp->pause_received++;
3568 }
3569
3570 static void niu_bmac_interrupt(struct niu *np)
3571 {
3572         struct niu_bmac_stats *mp = &np->mac_stats.bmac;
3573         u64 val;
3574
3575         val = nr64_mac(BTXMAC_STATUS);
3576         if (val & BTXMAC_STATUS_UNDERRUN)
3577                 mp->tx_underflow_errors++;
3578         if (val & BTXMAC_STATUS_MAX_PKT_ERR)
3579                 mp->tx_max_pkt_size_errors++;
3580         if (val & BTXMAC_STATUS_BYTE_CNT_EXP)
3581                 mp->tx_bytes += BTXMAC_BYTE_CNT_COUNT;
3582         if (val & BTXMAC_STATUS_FRAME_CNT_EXP)
3583                 mp->tx_frames += BTXMAC_FRM_CNT_COUNT;
3584
3585         val = nr64_mac(BRXMAC_STATUS);
3586         if (val & BRXMAC_STATUS_OVERFLOW)
3587                 mp->rx_overflows++;
3588         if (val & BRXMAC_STATUS_FRAME_CNT_EXP)
3589                 mp->rx_frames += BRXMAC_FRAME_CNT_COUNT;
3590         if (val & BRXMAC_STATUS_ALIGN_ERR_EXP)
3591                 mp->rx_align_errors += BRXMAC_ALIGN_ERR_CNT_COUNT;
3592         if (val & BRXMAC_STATUS_CRC_ERR_EXP)
3593                 mp->rx_crc_errors += BRXMAC_ALIGN_ERR_CNT_COUNT;
3594         if (val & BRXMAC_STATUS_LEN_ERR_EXP)
3595                 mp->rx_len_errors += BRXMAC_CODE_VIOL_ERR_CNT_COUNT;
3596
3597         val = nr64_mac(BMAC_CTRL_STATUS);
3598         if (val & BMAC_CTRL_STATUS_NOPAUSE)
3599                 mp->pause_off_state++;
3600         if (val & BMAC_CTRL_STATUS_PAUSE)
3601                 mp->pause_on_state++;
3602         if (val & BMAC_CTRL_STATUS_PAUSE_RECV)
3603                 mp->pause_received++;
3604 }
3605
3606 static int niu_mac_interrupt(struct niu *np)
3607 {
3608         if (np->flags & NIU_FLAGS_XMAC)
3609                 niu_xmac_interrupt(np);
3610         else
3611                 niu_bmac_interrupt(np);
3612
3613         return 0;
3614 }
3615
3616 static void niu_log_device_error(struct niu *np, u64 stat)
3617 {
3618         dev_err(np->device, PFX "%s: Core device errors ( ",
3619                 np->dev->name);
3620
3621         if (stat & SYS_ERR_MASK_META2)
3622                 printk("META2 ");
3623         if (stat & SYS_ERR_MASK_META1)
3624                 printk("META1 ");
3625         if (stat & SYS_ERR_MASK_PEU)
3626                 printk("PEU ");
3627         if (stat & SYS_ERR_MASK_TXC)
3628                 printk("TXC ");
3629         if (stat & SYS_ERR_MASK_RDMC)
3630                 printk("RDMC ");
3631         if (stat & SYS_ERR_MASK_TDMC)
3632                 printk("TDMC ");
3633         if (stat & SYS_ERR_MASK_ZCP)
3634                 printk("ZCP ");
3635         if (stat & SYS_ERR_MASK_FFLP)
3636                 printk("FFLP ");
3637         if (stat & SYS_ERR_MASK_IPP)
3638                 printk("IPP ");
3639         if (stat & SYS_ERR_MASK_MAC)
3640                 printk("MAC ");
3641         if (stat & SYS_ERR_MASK_SMX)
3642                 printk("SMX ");
3643
3644         printk(")\n");
3645 }
3646
3647 static int niu_device_error(struct niu *np)
3648 {
3649         u64 stat = nr64(SYS_ERR_STAT);
3650
3651         dev_err(np->device, PFX "%s: Core device error, stat[%llx]\n",
3652                 np->dev->name, (unsigned long long) stat);
3653
3654         niu_log_device_error(np, stat);
3655
3656         return -ENODEV;
3657 }
3658
3659 static int niu_slowpath_interrupt(struct niu *np, struct niu_ldg *lp,
3660                               u64 v0, u64 v1, u64 v2)
3661 {
3662
3663         int i, err = 0;
3664
3665         lp->v0 = v0;
3666         lp->v1 = v1;
3667         lp->v2 = v2;
3668
3669         if (v1 & 0x00000000ffffffffULL) {
3670                 u32 rx_vec = (v1 & 0xffffffff);
3671
3672                 for (i = 0; i < np->num_rx_rings; i++) {
3673                         struct rx_ring_info *rp = &np->rx_rings[i];
3674
3675                         if (rx_vec & (1 << rp->rx_channel)) {
3676                                 int r = niu_rx_error(np, rp);
3677                                 if (r) {
3678                                         err = r;
3679                                 } else {
3680                                         if (!v0)
3681                                                 nw64(RX_DMA_CTL_STAT(rp->rx_channel),
3682                                                      RX_DMA_CTL_STAT_MEX);
3683                                 }
3684                         }
3685                 }
3686         }
3687         if (v1 & 0x7fffffff00000000ULL) {
3688                 u32 tx_vec = (v1 >> 32) & 0x7fffffff;
3689
3690                 for (i = 0; i < np->num_tx_rings; i++) {
3691                         struct tx_ring_info *rp = &np->tx_rings[i];
3692
3693                         if (tx_vec & (1 << rp->tx_channel)) {
3694                                 int r = niu_tx_error(np, rp);
3695                                 if (r)
3696                                         err = r;
3697                         }
3698                 }
3699         }
3700         if ((v0 | v1) & 0x8000000000000000ULL) {
3701                 int r = niu_mif_interrupt(np);
3702                 if (r)
3703                         err = r;
3704         }
3705         if (v2) {
3706                 if (v2 & 0x01ef) {
3707                         int r = niu_mac_interrupt(np);
3708                         if (r)
3709                                 err = r;
3710                 }
3711                 if (v2 & 0x0210) {
3712                         int r = niu_device_error(np);
3713                         if (r)
3714                                 err = r;
3715                 }
3716         }
3717
3718         if (err)
3719                 niu_enable_interrupts(np, 0);
3720
3721         return err;
3722 }
3723
3724 static void niu_rxchan_intr(struct niu *np, struct rx_ring_info *rp,
3725                             int ldn)
3726 {
3727         struct rxdma_mailbox *mbox = rp->mbox;
3728         u64 stat_write, stat = le64_to_cpup(&mbox->rx_dma_ctl_stat);
3729
3730         stat_write = (RX_DMA_CTL_STAT_RCRTHRES |
3731                       RX_DMA_CTL_STAT_RCRTO);
3732         nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat_write);
3733
3734         niudbg(INTR, "%s: rxchan_intr stat[%llx]\n",
3735                np->dev->name, (unsigned long long) stat);
3736 }
3737
3738 static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp,
3739                             int ldn)
3740 {
3741         rp->tx_cs = nr64(TX_CS(rp->tx_channel));
3742
3743         niudbg(INTR, "%s: txchan_intr cs[%llx]\n",
3744                np->dev->name, (unsigned long long) rp->tx_cs);
3745 }
3746
3747 static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0)
3748 {
3749         struct niu_parent *parent = np->parent;
3750         u32 rx_vec, tx_vec;
3751         int i;
3752
3753         tx_vec = (v0 >> 32);
3754         rx_vec = (v0 & 0xffffffff);
3755
3756         for (i = 0; i < np->num_rx_rings; i++) {
3757                 struct rx_ring_info *rp = &np->rx_rings[i];
3758                 int ldn = LDN_RXDMA(rp->rx_channel);
3759
3760                 if (parent->ldg_map[ldn] != ldg)
3761                         continue;
3762
3763                 nw64(LD_IM0(ldn), LD_IM0_MASK);
3764                 if (rx_vec & (1 << rp->rx_channel))
3765                         niu_rxchan_intr(np, rp, ldn);
3766         }
3767
3768         for (i = 0; i < np->num_tx_rings; i++) {
3769                 struct tx_ring_info *rp = &np->tx_rings[i];
3770                 int ldn = LDN_TXDMA(rp->tx_channel);
3771
3772                 if (parent->ldg_map[ldn] != ldg)
3773                         continue;
3774
3775                 nw64(LD_IM0(ldn), LD_IM0_MASK);
3776                 if (tx_vec & (1 << rp->tx_channel))
3777                         niu_txchan_intr(np, rp, ldn);
3778         }
3779 }
3780
3781 static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp,
3782                               u64 v0, u64 v1, u64 v2)
3783 {
3784         if (likely(netif_rx_schedule_prep(np->dev, &lp->napi))) {
3785                 lp->v0 = v0;
3786                 lp->v1 = v1;
3787                 lp->v2 = v2;
3788                 __niu_fastpath_interrupt(np, lp->ldg_num, v0);
3789                 __netif_rx_schedule(np->dev, &lp->napi);
3790         }
3791 }
3792
3793 static irqreturn_t niu_interrupt(int irq, void *dev_id)
3794 {
3795         struct niu_ldg *lp = dev_id;
3796         struct niu *np = lp->np;
3797         int ldg = lp->ldg_num;
3798         unsigned long flags;
3799         u64 v0, v1, v2;
3800
3801         if (netif_msg_intr(np))
3802                 printk(KERN_DEBUG PFX "niu_interrupt() ldg[%p](%d) ",
3803                        lp, ldg);
3804
3805         spin_lock_irqsave(&np->lock, flags);
3806
3807         v0 = nr64(LDSV0(ldg));
3808         v1 = nr64(LDSV1(ldg));
3809         v2 = nr64(LDSV2(ldg));
3810
3811         if (netif_msg_intr(np))
3812                 printk("v0[%llx] v1[%llx] v2[%llx]\n",
3813                        (unsigned long long) v0,
3814                        (unsigned long long) v1,
3815                        (unsigned long long) v2);
3816
3817         if (unlikely(!v0 && !v1 && !v2)) {
3818                 spin_unlock_irqrestore(&np->lock, flags);
3819                 return IRQ_NONE;
3820         }
3821
3822         if (unlikely((v0 & ((u64)1 << LDN_MIF)) || v1 || v2)) {
3823                 int err = niu_slowpath_interrupt(np, lp, v0, v1, v2);
3824                 if (err)
3825                         goto out;
3826         }
3827         if (likely(v0 & ~((u64)1 << LDN_MIF)))
3828                 niu_schedule_napi(np, lp, v0, v1, v2);
3829         else
3830                 niu_ldg_rearm(np, lp, 1);
3831 out:
3832         spin_unlock_irqrestore(&np->lock, flags);
3833
3834         return IRQ_HANDLED;
3835 }
3836
3837 static void niu_free_rx_ring_info(struct niu *np, struct rx_ring_info *rp)
3838 {
3839         if (rp->mbox) {
3840                 np->ops->free_coherent(np->device,
3841                                        sizeof(struct rxdma_mailbox),
3842                                        rp->mbox, rp->mbox_dma);
3843                 rp->mbox = NULL;
3844         }
3845         if (rp->rcr) {
3846                 np->ops->free_coherent(np->device,
3847                                        MAX_RCR_RING_SIZE * sizeof(__le64),
3848                                        rp->rcr, rp->rcr_dma);
3849                 rp->rcr = NULL;
3850                 rp->rcr_table_size = 0;
3851                 rp->rcr_index = 0;
3852         }
3853         if (rp->rbr) {
3854                 niu_rbr_free(np, rp);
3855
3856                 np->ops->free_coherent(np->device,
3857                                        MAX_RBR_RING_SIZE * sizeof(__le32),
3858                                        rp->rbr, rp->rbr_dma);
3859                 rp->rbr = NULL;
3860                 rp->rbr_table_size = 0;
3861                 rp->rbr_index = 0;
3862         }
3863         kfree(rp->rxhash);
3864         rp->rxhash = NULL;
3865 }
3866
3867 static void niu_free_tx_ring_info(struct niu *np, struct tx_ring_info *rp)
3868 {
3869         if (rp->mbox) {
3870                 np->ops->free_coherent(np->device,
3871                                        sizeof(struct txdma_mailbox),
3872                                        rp->mbox, rp->mbox_dma);
3873                 rp->mbox = NULL;
3874         }
3875         if (rp->descr) {
3876                 int i;
3877
3878                 for (i = 0; i < MAX_TX_RING_SIZE; i++) {
3879                         if (rp->tx_buffs[i].skb)
3880                                 (void) release_tx_packet(np, rp, i);
3881                 }
3882
3883                 np->ops->free_coherent(np->device,
3884                                        MAX_TX_RING_SIZE * sizeof(__le64),
3885                                        rp->descr, rp->descr_dma);
3886                 rp->descr = NULL;
3887                 rp->pending = 0;
3888                 rp->prod = 0;
3889                 rp->cons = 0;
3890                 rp->wrap_bit = 0;
3891         }
3892 }
3893
3894 static void niu_free_channels(struct niu *np)
3895 {
3896         int i;
3897
3898         if (np->rx_rings) {
3899                 for (i = 0; i < np->num_rx_rings; i++) {
3900                         struct rx_ring_info *rp = &np->rx_rings[i];
3901
3902                         niu_free_rx_ring_info(np, rp);
3903                 }
3904                 kfree(np->rx_rings);
3905                 np->rx_rings = NULL;
3906                 np->num_rx_rings = 0;
3907         }
3908
3909         if (np->tx_rings) {
3910                 for (i = 0; i < np->num_tx_rings; i++) {
3911                         struct tx_ring_info *rp = &np->tx_rings[i];
3912
3913                         niu_free_tx_ring_info(np, rp);
3914                 }
3915                 kfree(np->tx_rings);
3916                 np->tx_rings = NULL;
3917                 np->num_tx_rings = 0;
3918         }
3919 }
3920
3921 static int niu_alloc_rx_ring_info(struct niu *np,
3922                                   struct rx_ring_info *rp)
3923 {
3924         BUILD_BUG_ON(sizeof(struct rxdma_mailbox) != 64);
3925
3926         rp->rxhash = kzalloc(MAX_RBR_RING_SIZE * sizeof(struct page *),
3927                              GFP_KERNEL);
3928         if (!rp->rxhash)
3929                 return -ENOMEM;
3930
3931         rp->mbox = np->ops->alloc_coherent(np->device,
3932                                            sizeof(struct rxdma_mailbox),
3933                                            &rp->mbox_dma, GFP_KERNEL);
3934         if (!rp->mbox)
3935                 return -ENOMEM;
3936         if ((unsigned long)rp->mbox & (64UL - 1)) {
3937                 dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
3938                         "RXDMA mailbox %p\n", np->dev->name, rp->mbox);
3939                 return -EINVAL;
3940         }
3941
3942         rp->rcr = np->ops->alloc_coherent(np->device,
3943                                           MAX_RCR_RING_SIZE * sizeof(__le64),
3944                                           &rp->rcr_dma, GFP_KERNEL);
3945         if (!rp->rcr)
3946                 return -ENOMEM;
3947         if ((unsigned long)rp->rcr & (64UL - 1)) {
3948                 dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
3949                         "RXDMA RCR table %p\n", np->dev->name, rp->rcr);
3950                 return -EINVAL;
3951         }
3952         rp->rcr_table_size = MAX_RCR_RING_SIZE;
3953         rp->rcr_index = 0;
3954
3955         rp->rbr = np->ops->alloc_coherent(np->device,
3956                                           MAX_RBR_RING_SIZE * sizeof(__le32),
3957                                           &rp->rbr_dma, GFP_KERNEL);
3958         if (!rp->rbr)
3959                 return -ENOMEM;
3960         if ((unsigned long)rp->rbr & (64UL - 1)) {
3961                 dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
3962                         "RXDMA RBR table %p\n", np->dev->name, rp->rbr);
3963                 return -EINVAL;
3964         }
3965         rp->rbr_table_size = MAX_RBR_RING_SIZE;
3966         rp->rbr_index = 0;
3967         rp->rbr_pending = 0;
3968
3969         return 0;
3970 }
3971
3972 static void niu_set_max_burst(struct niu *np, struct tx_ring_info *rp)
3973 {
3974         int mtu = np->dev->mtu;
3975
3976         /* These values are recommended by the HW designers for fair
3977          * utilization of DRR amongst the rings.
3978          */
3979         rp->max_burst = mtu + 32;
3980         if (rp->max_burst > 4096)
3981                 rp->max_burst = 4096;
3982 }
3983
3984 static int niu_alloc_tx_ring_info(struct niu *np,
3985                                   struct tx_ring_info *rp)
3986 {
3987         BUILD_BUG_ON(sizeof(struct txdma_mailbox) != 64);
3988
3989         rp->mbox = np->ops->alloc_coherent(np->device,
3990                                            sizeof(struct txdma_mailbox),
3991                                            &rp->mbox_dma, GFP_KERNEL);
3992         if (!rp->mbox)
3993                 return -ENOMEM;
3994         if ((unsigned long)rp->mbox & (64UL - 1)) {
3995                 dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
3996                         "TXDMA mailbox %p\n", np->dev->name, rp->mbox);
3997                 return -EINVAL;
3998         }
3999
4000         rp->descr = np->ops->alloc_coherent(np->device,
4001                                             MAX_TX_RING_SIZE * sizeof(__le64),
4002                                             &rp->descr_dma, GFP_KERNEL);
4003         if (!rp->descr)
4004                 return -ENOMEM;
4005         if ((unsigned long)rp->descr & (64UL - 1)) {
4006                 dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
4007                         "TXDMA descr table %p\n", np->dev->name, rp->descr);
4008                 return -EINVAL;
4009         }
4010
4011         rp->pending = MAX_TX_RING_SIZE;
4012         rp->prod = 0;
4013         rp->cons = 0;
4014         rp->wrap_bit = 0;
4015
4016         /* XXX make these configurable... XXX */
4017         rp->mark_freq = rp->pending / 4;
4018
4019         niu_set_max_burst(np, rp);
4020
4021         return 0;
4022 }
4023
4024 static void niu_size_rbr(struct niu *np, struct rx_ring_info *rp)
4025 {
4026         u16 bss;
4027
4028         bss = min(PAGE_SHIFT, 15);
4029
4030         rp->rbr_block_size = 1 << bss;
4031         rp->rbr_blocks_per_page = 1 << (PAGE_SHIFT-bss);
4032
4033         rp->rbr_sizes[0] = 256;
4034         rp->rbr_sizes[1] = 1024;
4035         if (np->dev->mtu > ETH_DATA_LEN) {
4036                 switch (PAGE_SIZE) {
4037                 case 4 * 1024:
4038                         rp->rbr_sizes[2] = 4096;
4039                         break;
4040
4041                 default:
4042                         rp->rbr_sizes[2] = 8192;
4043                         break;
4044                 }
4045         } else {
4046                 rp->rbr_sizes[2] = 2048;
4047         }
4048         rp->rbr_sizes[3] = rp->rbr_block_size;
4049 }
4050
4051 static int niu_alloc_channels(struct niu *np)
4052 {
4053         struct niu_parent *parent = np->parent;
4054         int first_rx_channel, first_tx_channel;
4055         int i, port, err;
4056
4057         port = np->port;
4058         first_rx_channel = first_tx_channel = 0;
4059         for (i = 0; i < port; i++) {
4060                 first_rx_channel += parent->rxchan_per_port[i];
4061                 first_tx_channel += parent->txchan_per_port[i];
4062         }
4063
4064         np->num_rx_rings = parent->rxchan_per_port[port];
4065         np->num_tx_rings = parent->txchan_per_port[port];
4066
4067         np->dev->real_num_tx_queues = np->num_tx_rings;
4068
4069         np->rx_rings = kzalloc(np->num_rx_rings * sizeof(struct rx_ring_info),
4070                                GFP_KERNEL);
4071         err = -ENOMEM;
4072         if (!np->rx_rings)
4073                 goto out_err;
4074
4075         for (i = 0; i < np->num_rx_rings; i++) {
4076                 struct rx_ring_info *rp = &np->rx_rings[i];
4077
4078                 rp->np = np;
4079                 rp->rx_channel = first_rx_channel + i;
4080
4081                 err = niu_alloc_rx_ring_info(np, rp);
4082                 if (err)
4083                         goto out_err;
4084
4085                 niu_size_rbr(np, rp);
4086
4087                 /* XXX better defaults, configurable, etc... XXX */
4088                 rp->nonsyn_window = 64;
4089                 rp->nonsyn_threshold = rp->rcr_table_size - 64;
4090                 rp->syn_window = 64;
4091                 rp->syn_threshold = rp->rcr_table_size - 64;
4092                 rp->rcr_pkt_threshold = 16;
4093                 rp->rcr_timeout = 8;
4094                 rp->rbr_kick_thresh = RBR_REFILL_MIN;
4095                 if (rp->rbr_kick_thresh < rp->rbr_blocks_per_page)
4096                         rp->rbr_kick_thresh = rp->rbr_blocks_per_page;
4097
4098                 err = niu_rbr_fill(np, rp, GFP_KERNEL);
4099                 if (err)
4100                         return err;
4101         }
4102
4103         np->tx_rings = kzalloc(np->num_tx_rings * sizeof(struct tx_ring_info),
4104                                GFP_KERNEL);
4105         err = -ENOMEM;
4106         if (!np->tx_rings)
4107                 goto out_err;
4108
4109         for (i = 0; i < np->num_tx_rings; i++) {
4110                 struct tx_ring_info *rp = &np->tx_rings[i];
4111
4112                 rp->np = np;
4113                 rp->tx_channel = first_tx_channel + i;
4114
4115                 err = niu_alloc_tx_ring_info(np, rp);
4116                 if (err)
4117                         goto out_err;
4118         }
4119
4120         return 0;
4121
4122 out_err:
4123         niu_free_channels(np);
4124         return err;
4125 }
4126
4127 static int niu_tx_cs_sng_poll(struct niu *np, int channel)
4128 {
4129         int limit = 1000;
4130
4131         while (--limit > 0) {
4132                 u64 val = nr64(TX_CS(channel));
4133                 if (val & TX_CS_SNG_STATE)
4134                         return 0;
4135         }
4136         return -ENODEV;
4137 }
4138
4139 static int niu_tx_channel_stop(struct niu *np, int channel)
4140 {
4141         u64 val = nr64(TX_CS(channel));
4142
4143         val |= TX_CS_STOP_N_GO;
4144         nw64(TX_CS(channel), val);
4145
4146         return niu_tx_cs_sng_poll(np, channel);
4147 }
4148
4149 static int niu_tx_cs_reset_poll(struct niu *np, int channel)
4150 {
4151         int limit = 1000;
4152
4153         while (--limit > 0) {
4154                 u64 val = nr64(TX_CS(channel));
4155                 if (!(val & TX_CS_RST))
4156                         return 0;
4157         }
4158         return -ENODEV;
4159 }
4160
4161 static int niu_tx_channel_reset(struct niu *np, int channel)
4162 {
4163         u64 val = nr64(TX_CS(channel));
4164         int err;
4165
4166         val |= TX_CS_RST;
4167         nw64(TX_CS(channel), val);
4168
4169         err = niu_tx_cs_reset_poll(np, channel);
4170         if (!err)
4171                 nw64(TX_RING_KICK(channel), 0);
4172
4173         return err;
4174 }
4175
4176 static int niu_tx_channel_lpage_init(struct niu *np, int channel)
4177 {
4178         u64 val;
4179
4180         nw64(TX_LOG_MASK1(channel), 0);
4181         nw64(TX_LOG_VAL1(channel), 0);
4182         nw64(TX_LOG_MASK2(channel), 0);
4183         nw64(TX_LOG_VAL2(channel), 0);
4184         nw64(TX_LOG_PAGE_RELO1(channel), 0);
4185         nw64(TX_LOG_PAGE_RELO2(channel), 0);
4186         nw64(TX_LOG_PAGE_HDL(channel), 0);
4187
4188         val  = (u64)np->port << TX_LOG_PAGE_VLD_FUNC_SHIFT;
4189         val |= (TX_LOG_PAGE_VLD_PAGE0 | TX_LOG_PAGE_VLD_PAGE1);
4190         nw64(TX_LOG_PAGE_VLD(channel), val);
4191
4192         /* XXX TXDMA 32bit mode? XXX */
4193
4194         return 0;
4195 }
4196
4197 static void niu_txc_enable_port(struct niu *np, int on)
4198 {
4199         unsigned long flags;
4200         u64 val, mask;
4201
4202         niu_lock_parent(np, flags);
4203         val = nr64(TXC_CONTROL);
4204         mask = (u64)1 << np->port;
4205         if (on) {
4206                 val |= TXC_CONTROL_ENABLE | mask;
4207         } else {
4208                 val &= ~mask;
4209                 if ((val & ~TXC_CONTROL_ENABLE) == 0)
4210                         val &= ~TXC_CONTROL_ENABLE;
4211         }
4212         nw64(TXC_CONTROL, val);
4213         niu_unlock_parent(np, flags);
4214 }
4215
4216 static void niu_txc_set_imask(struct niu *np, u64 imask)
4217 {
4218         unsigned long flags;
4219         u64 val;
4220
4221         niu_lock_parent(np, flags);
4222         val = nr64(TXC_INT_MASK);
4223         val &= ~TXC_INT_MASK_VAL(np->port);
4224         val |= (imask << TXC_INT_MASK_VAL_SHIFT(np->port));
4225         niu_unlock_parent(np, flags);
4226 }
4227
4228 static void niu_txc_port_dma_enable(struct niu *np, int on)
4229 {
4230         u64 val = 0;
4231
4232         if (on) {
4233                 int i;
4234
4235                 for (i = 0; i < np->num_tx_rings; i++)
4236                         val |= (1 << np->tx_rings[i].tx_channel);
4237         }
4238         nw64(TXC_PORT_DMA(np->port), val);
4239 }
4240
4241 static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
4242 {
4243         int err, channel = rp->tx_channel;
4244         u64 val, ring_len;
4245
4246         err = niu_tx_channel_stop(np, channel);
4247         if (err)
4248                 return err;
4249
4250         err = niu_tx_channel_reset(np, channel);
4251         if (err)
4252                 return err;
4253
4254         err = niu_tx_channel_lpage_init(np, channel);
4255         if (err)
4256                 return err;
4257
4258         nw64(TXC_DMA_MAX(channel), rp->max_burst);
4259         nw64(TX_ENT_MSK(channel), 0);
4260
4261         if (rp->descr_dma & ~(TX_RNG_CFIG_STADDR_BASE |
4262                               TX_RNG_CFIG_STADDR)) {
4263                 dev_err(np->device, PFX "%s: TX ring channel %d "
4264                         "DMA addr (%llx) is not aligned.\n",
4265                         np->dev->name, channel,
4266                         (unsigned long long) rp->descr_dma);
4267                 return -EINVAL;
4268         }
4269
4270         /* The length field in TX_RNG_CFIG is measured in 64-byte
4271          * blocks.  rp->pending is the number of TX descriptors in
4272          * our ring, 8 bytes each, thus we divide by 8 bytes more
4273          * to get the proper value the chip wants.
4274          */
4275         ring_len = (rp->pending / 8);
4276
4277         val = ((ring_len << TX_RNG_CFIG_LEN_SHIFT) |
4278                rp->descr_dma);
4279         nw64(TX_RNG_CFIG(channel), val);
4280
4281         if (((rp->mbox_dma >> 32) & ~TXDMA_MBH_MBADDR) ||
4282             ((u32)rp->mbox_dma & ~TXDMA_MBL_MBADDR)) {
4283                 dev_err(np->device, PFX "%s: TX ring channel %d "
4284                         "MBOX addr (%llx) is has illegal bits.\n",
4285                         np->dev->name, channel,
4286                         (unsigned long long) rp->mbox_dma);
4287                 return -EINVAL;
4288         }
4289         nw64(TXDMA_MBH(channel), rp->mbox_dma >> 32);
4290         nw64(TXDMA_MBL(channel), rp->mbox_dma & TXDMA_MBL_MBADDR);
4291
4292         nw64(TX_CS(channel), 0);
4293
4294         rp->last_pkt_cnt = 0;
4295
4296         return 0;
4297 }
4298
4299 static void niu_init_rdc_groups(struct niu *np)
4300 {
4301         struct niu_rdc_tables *tp = &np->parent->rdc_group_cfg[np->port];
4302         int i, first_table_num = tp->first_table_num;
4303
4304         for (i = 0; i < tp->num_tables; i++) {
4305                 struct rdc_table *tbl = &tp->tables[i];
4306                 int this_table = first_table_num + i;
4307                 int slot;
4308
4309                 for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++)
4310                         nw64(RDC_TBL(this_table, slot),
4311                              tbl->rxdma_channel[slot]);
4312         }
4313
4314         nw64(DEF_RDC(np->port), np->parent->rdc_default[np->port]);
4315 }
4316
4317 static void niu_init_drr_weight(struct niu *np)
4318 {
4319         int type = phy_decode(np->parent->port_phy, np->port);
4320         u64 val;
4321
4322         switch (type) {
4323         case PORT_TYPE_10G:
4324                 val = PT_DRR_WEIGHT_DEFAULT_10G;
4325                 break;
4326
4327         case PORT_TYPE_1G:
4328         default:
4329                 val = PT_DRR_WEIGHT_DEFAULT_1G;
4330                 break;
4331         }
4332         nw64(PT_DRR_WT(np->port), val);
4333 }
4334
4335 static int niu_init_hostinfo(struct niu *np)
4336 {
4337         struct niu_parent *parent = np->parent;
4338         struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
4339         int i, err, num_alt = niu_num_alt_addr(np);
4340         int first_rdc_table = tp->first_table_num;
4341
4342         err = niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
4343         if (err)
4344                 return err;
4345
4346         err = niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
4347         if (err)
4348                 return err;
4349
4350         for (i = 0; i < num_alt; i++) {
4351                 err = niu_set_alt_mac_rdc_table(np, i, first_rdc_table, 1);
4352                 if (err)
4353                         return err;
4354         }
4355
4356         return 0;
4357 }
4358
4359 static int niu_rx_channel_reset(struct niu *np, int channel)
4360 {
4361         return niu_set_and_wait_clear(np, RXDMA_CFIG1(channel),
4362                                       RXDMA_CFIG1_RST, 1000, 10,
4363                                       "RXDMA_CFIG1");
4364 }
4365
4366 static int niu_rx_channel_lpage_init(struct niu *np, int channel)
4367 {
4368         u64 val;
4369
4370         nw64(RX_LOG_MASK1(channel), 0);
4371         nw64(RX_LOG_VAL1(channel), 0);
4372         nw64(RX_LOG_MASK2(channel), 0);
4373         nw64(RX_LOG_VAL2(channel), 0);
4374         nw64(RX_LOG_PAGE_RELO1(channel), 0);
4375         nw64(RX_LOG_PAGE_RELO2(channel), 0);
4376         nw64(RX_LOG_PAGE_HDL(channel), 0);
4377
4378         val  = (u64)np->port << RX_LOG_PAGE_VLD_FUNC_SHIFT;
4379         val |= (RX_LOG_PAGE_VLD_PAGE0 | RX_LOG_PAGE_VLD_PAGE1);
4380         nw64(RX_LOG_PAGE_VLD(channel), val);
4381
4382         return 0;
4383 }
4384
4385 static void niu_rx_channel_wred_init(struct niu *np, struct rx_ring_info *rp)
4386 {
4387         u64 val;
4388
4389         val = (((u64)rp->nonsyn_window << RDC_RED_PARA_WIN_SHIFT) |
4390                ((u64)rp->nonsyn_threshold << RDC_RED_PARA_THRE_SHIFT) |
4391                ((u64)rp->syn_window << RDC_RED_PARA_WIN_SYN_SHIFT) |
4392                ((u64)rp->syn_threshold << RDC_RED_PARA_THRE_SYN_SHIFT));
4393         nw64(RDC_RED_PARA(rp->rx_channel), val);
4394 }
4395
4396 static int niu_compute_rbr_cfig_b(struct rx_ring_info *rp, u64 *ret)
4397 {
4398         u64 val = 0;
4399
4400         switch (rp->rbr_block_size) {
4401         case 4 * 1024:
4402                 val |= (RBR_BLKSIZE_4K << RBR_CFIG_B_BLKSIZE_SHIFT);
4403                 break;
4404         case 8 * 1024:
4405                 val |= (RBR_BLKSIZE_8K << RBR_CFIG_B_BLKSIZE_SHIFT);
4406                 break;
4407         case 16 * 1024:
4408                 val |= (RBR_BLKSIZE_16K << RBR_CFIG_B_BLKSIZE_SHIFT);
4409                 break;
4410         case 32 * 1024:
4411                 val |= (RBR_BLKSIZE_32K << RBR_CFIG_B_BLKSIZE_SHIFT);
4412                 break;
4413         default:
4414                 return -EINVAL;
4415         }
4416         val |= RBR_CFIG_B_VLD2;
4417         switch (rp->rbr_sizes[2]) {
4418         case 2 * 1024:
4419                 val |= (RBR_BUFSZ2_2K << RBR_CFIG_B_BUFSZ2_SHIFT);
4420                 break;
4421         case 4 * 1024:
4422                 val |= (RBR_BUFSZ2_4K << RBR_CFIG_B_BUFSZ2_SHIFT);
4423                 break;
4424         case 8 * 1024:
4425                 val |= (RBR_BUFSZ2_8K << RBR_CFIG_B_BUFSZ2_SHIFT);
4426                 break;
4427         case 16 * 1024:
4428                 val |= (RBR_BUFSZ2_16K << RBR_CFIG_B_BUFSZ2_SHIFT);
4429                 break;
4430
4431         default:
4432                 return -EINVAL;
4433         }
4434         val |= RBR_CFIG_B_VLD1;
4435         switch (rp->rbr_sizes[1]) {
4436         case 1 * 1024:
4437                 val |= (RBR_BUFSZ1_1K << RBR_CFIG_B_BUFSZ1_SHIFT);
4438                 break;
4439         case 2 * 1024:
4440                 val |= (RBR_BUFSZ1_2K << RBR_CFIG_B_BUFSZ1_SHIFT);
4441                 break;
4442         case 4 * 1024:
4443                 val |= (RBR_BUFSZ1_4K << RBR_CFIG_B_BUFSZ1_SHIFT);
4444                 break;
4445         case 8 * 1024:
4446                 val |= (RBR_BUFSZ1_8K << RBR_CFIG_B_BUFSZ1_SHIFT);
4447                 break;
4448
4449         default:
4450                 return -EINVAL;
4451         }
4452         val |= RBR_CFIG_B_VLD0;
4453         switch (rp->rbr_sizes[0]) {
4454         case 256:
4455                 val |= (RBR_BUFSZ0_256 << RBR_CFIG_B_BUFSZ0_SHIFT);
4456                 break;
4457         case 512:
4458                 val |= (RBR_BUFSZ0_512 << RBR_CFIG_B_BUFSZ0_SHIFT);
4459                 break;
4460         case 1 * 1024:
4461                 val |= (RBR_BUFSZ0_1K << RBR_CFIG_B_BUFSZ0_SHIFT);
4462                 break;
4463         case 2 * 1024:
4464                 val |= (RBR_BUFSZ0_2K << RBR_CFIG_B_BUFSZ0_SHIFT);
4465                 break;
4466
4467         default:
4468                 return -EINVAL;
4469         }
4470
4471         *ret = val;
4472         return 0;
4473 }
4474
4475 static int niu_enable_rx_channel(struct niu *np, int channel, int on)
4476 {
4477         u64 val = nr64(RXDMA_CFIG1(channel));
4478         int limit;
4479
4480         if (on)
4481                 val |= RXDMA_CFIG1_EN;
4482         else
4483                 val &= ~RXDMA_CFIG1_EN;
4484         nw64(RXDMA_CFIG1(channel), val);
4485
4486         limit = 1000;
4487         while (--limit > 0) {
4488                 if (nr64(RXDMA_CFIG1(channel)) & RXDMA_CFIG1_QST)
4489                         break;
4490                 udelay(10);
4491         }
4492         if (limit <= 0)
4493                 return -ENODEV;
4494         return 0;
4495 }
4496
4497 static int niu_init_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
4498 {
4499         int err, channel = rp->rx_channel;
4500         u64 val;
4501
4502         err = niu_rx_channel_reset(np, channel);
4503         if (err)
4504                 return err;
4505
4506         err = niu_rx_channel_lpage_init(np, channel);
4507         if (err)
4508                 return err;
4509
4510         niu_rx_channel_wred_init(np, rp);
4511
4512         nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_RBR_EMPTY);
4513         nw64(RX_DMA_CTL_STAT(channel),
4514              (RX_DMA_CTL_STAT_MEX |
4515               RX_DMA_CTL_STAT_RCRTHRES |
4516               RX_DMA_CTL_STAT_RCRTO |
4517               RX_DMA_CTL_STAT_RBR_EMPTY));
4518         nw64(RXDMA_CFIG1(channel), rp->mbox_dma >> 32);
4519         nw64(RXDMA_CFIG2(channel), (rp->mbox_dma & 0x00000000ffffffc0));
4520         nw64(RBR_CFIG_A(channel),
4521              ((u64)rp->rbr_table_size << RBR_CFIG_A_LEN_SHIFT) |
4522              (rp->rbr_dma & (RBR_CFIG_A_STADDR_BASE | RBR_CFIG_A_STADDR)));
4523         err = niu_compute_rbr_cfig_b(rp, &val);
4524         if (err)
4525                 return err;
4526         nw64(RBR_CFIG_B(channel), val);
4527         nw64(RCRCFIG_A(channel),
4528              ((u64)rp->rcr_table_size << RCRCFIG_A_LEN_SHIFT) |
4529              (rp->rcr_dma & (RCRCFIG_A_STADDR_BASE | RCRCFIG_A_STADDR)));
4530         nw64(RCRCFIG_B(channel),
4531              ((u64)rp->rcr_pkt_threshold << RCRCFIG_B_PTHRES_SHIFT) |
4532              RCRCFIG_B_ENTOUT |
4533              ((u64)rp->rcr_timeout << RCRCFIG_B_TIMEOUT_SHIFT));
4534
4535         err = niu_enable_rx_channel(np, channel, 1);
4536         if (err)
4537                 return err;
4538
4539         nw64(RBR_KICK(channel), rp->rbr_index);
4540
4541         val = nr64(RX_DMA_CTL_STAT(channel));
4542         val |= RX_DMA_CTL_STAT_RBR_EMPTY;
4543         nw64(RX_DMA_CTL_STAT(channel), val);
4544
4545         return 0;
4546 }
4547
4548 static int niu_init_rx_channels(struct niu *np)
4549 {
4550         unsigned long flags;
4551         u64 seed = jiffies_64;
4552         int err, i;
4553
4554         niu_lock_parent(np, flags);
4555         nw64(RX_DMA_CK_DIV, np->parent->rxdma_clock_divider);
4556         nw64(RED_RAN_INIT, RED_RAN_INIT_OPMODE | (seed & RED_RAN_INIT_VAL));
4557         niu_unlock_parent(np, flags);
4558
4559         /* XXX RXDMA 32bit mode? XXX */
4560
4561         niu_init_rdc_groups(np);
4562         niu_init_drr_weight(np);
4563
4564         err = niu_init_hostinfo(np);
4565         if (err)
4566                 return err;
4567
4568         for (i = 0; i < np->num_rx_rings; i++) {
4569                 struct rx_ring_info *rp = &np->rx_rings[i];
4570
4571                 err = niu_init_one_rx_channel(np, rp);
4572                 if (err)
4573                         return err;
4574         }
4575
4576         return 0;
4577 }
4578
4579 static int niu_set_ip_frag_rule(struct niu *np)
4580 {
4581         struct niu_parent *parent = np->parent;
4582         struct niu_classifier *cp = &np->clas;
4583         struct niu_tcam_entry *tp;
4584         int index, err;
4585
4586         /* XXX fix this allocation scheme XXX */
4587         index = cp->tcam_index;
4588         tp = &parent->tcam[index];
4589
4590         /* Note that the noport bit is the same in both ipv4 and
4591          * ipv6 format TCAM entries.
4592          */
4593         memset(tp, 0, sizeof(*tp));
4594         tp->key[1] = TCAM_V4KEY1_NOPORT;
4595         tp->key_mask[1] = TCAM_V4KEY1_NOPORT;
4596         tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET |
4597                           ((u64)0 << TCAM_ASSOCDATA_OFFSET_SHIFT));
4598         err = tcam_write(np, index, tp->key, tp->key_mask);
4599         if (err)
4600                 return err;
4601         err = tcam_assoc_write(np, index, tp->assoc_data);
4602         if (err)
4603                 return err;
4604
4605         return 0;
4606 }
4607
4608 static int niu_init_classifier_hw(struct niu *np)
4609 {
4610         struct niu_parent *parent = np->parent;
4611         struct niu_classifier *cp = &np->clas;
4612         int i, err;
4613
4614         nw64(H1POLY, cp->h1_init);
4615         nw64(H2POLY, cp->h2_init);
4616
4617         err = niu_init_hostinfo(np);
4618         if (err)
4619                 return err;
4620
4621         for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++) {
4622                 struct niu_vlan_rdc *vp = &cp->vlan_mappings[i];
4623
4624                 vlan_tbl_write(np, i, np->port,
4625                                vp->vlan_pref, vp->rdc_num);
4626         }
4627
4628         for (i = 0; i < cp->num_alt_mac_mappings; i++) {
4629                 struct niu_altmac_rdc *ap = &cp->alt_mac_mappings[i];
4630
4631                 err = niu_set_alt_mac_rdc_table(np, ap->alt_mac_num,
4632                                                 ap->rdc_num, ap->mac_pref);
4633                 if (err)
4634                         return err;
4635         }
4636
4637         for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) {
4638                 int index = i - CLASS_CODE_USER_PROG1;
4639
4640                 err = niu_set_tcam_key(np, i, parent->tcam_key[index]);
4641                 if (err)
4642                         return err;
4643                 err = niu_set_flow_key(np, i, parent->flow_key[index]);
4644                 if (err)
4645                         return err;
4646         }
4647
4648         err = niu_set_ip_frag_rule(np);
4649         if (err)
4650                 return err;
4651
4652         tcam_enable(np, 1);
4653
4654         return 0;
4655 }
4656
4657 static int niu_zcp_write(struct niu *np, int index, u64 *data)
4658 {
4659         nw64(ZCP_RAM_DATA0, data[0]);
4660         nw64(ZCP_RAM_DATA1, data[1]);
4661         nw64(ZCP_RAM_DATA2, data[2]);
4662         nw64(ZCP_RAM_DATA3, data[3]);
4663         nw64(ZCP_RAM_DATA4, data[4]);
4664         nw64(ZCP_RAM_BE, ZCP_RAM_BE_VAL);
4665         nw64(ZCP_RAM_ACC,
4666              (ZCP_RAM_ACC_WRITE |
4667               (0 << ZCP_RAM_ACC_ZFCID_SHIFT) |
4668               (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT)));
4669
4670         return niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
4671                                    1000, 100);
4672 }
4673
4674 static int niu_zcp_read(struct niu *np, int index, u64 *data)
4675 {
4676         int err;
4677
4678         err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
4679                                   1000, 100);
4680         if (err) {
4681                 dev_err(np->device, PFX "%s: ZCP read busy won't clear, "
4682                         "ZCP_RAM_ACC[%llx]\n", np->dev->name,
4683                         (unsigned long long) nr64(ZCP_RAM_ACC));
4684                 return err;
4685         }
4686
4687         nw64(ZCP_RAM_ACC,
4688              (ZCP_RAM_ACC_READ |
4689               (0 << ZCP_RAM_ACC_ZFCID_SHIFT) |
4690               (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT)));
4691
4692         err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
4693                                   1000, 100);
4694         if (err) {
4695                 dev_err(np->device, PFX "%s: ZCP read busy2 won't clear, "
4696                         "ZCP_RAM_ACC[%llx]\n", np->dev->name,
4697                         (unsigned long long) nr64(ZCP_RAM_ACC));
4698                 return err;
4699         }
4700
4701         data[0] = nr64(ZCP_RAM_DATA0);
4702         data[1] = nr64(ZCP_RAM_DATA1);
4703         data[2] = nr64(ZCP_RAM_DATA2);
4704         data[3] = nr64(ZCP_RAM_DATA3);
4705         data[4] = nr64(ZCP_RAM_DATA4);
4706
4707         return 0;
4708 }
4709
4710 static void niu_zcp_cfifo_reset(struct niu *np)
4711 {
4712         u64 val = nr64(RESET_CFIFO);
4713
4714         val |= RESET_CFIFO_RST(np->port);
4715         nw64(RESET_CFIFO, val);
4716         udelay(10);
4717
4718         val &= ~RESET_CFIFO_RST(np->port);
4719         nw64(RESET_CFIFO, val);
4720 }
4721
4722 static int niu_init_zcp(struct niu *np)
4723 {
4724         u64 data[5], rbuf[5];
4725         int i, max, err;
4726
4727         if (np->parent->plat_type != PLAT_TYPE_NIU) {
4728                 if (np->port == 0 || np->port == 1)
4729                         max = ATLAS_P0_P1_CFIFO_ENTRIES;
4730                 else
4731                         max = ATLAS_P2_P3_CFIFO_ENTRIES;
4732         } else
4733                 max = NIU_CFIFO_ENTRIES;
4734
4735         data[0] = 0;
4736         data[1] = 0;
4737         data[2] = 0;
4738         data[3] = 0;
4739         data[4] = 0;
4740
4741         for (i = 0; i < max; i++) {
4742                 err = niu_zcp_write(np, i, data);
4743                 if (err)
4744                         return err;
4745                 err = niu_zcp_read(np, i, rbuf);
4746                 if (err)
4747                         return err;
4748         }
4749
4750         niu_zcp_cfifo_reset(np);
4751         nw64(CFIFO_ECC(np->port), 0);
4752         nw64(ZCP_INT_STAT, ZCP_INT_STAT_ALL);
4753         (void) nr64(ZCP_INT_STAT);
4754         nw64(ZCP_INT_MASK, ZCP_INT_MASK_ALL);
4755
4756         return 0;
4757 }
4758
4759 static void niu_ipp_write(struct niu *np, int index, u64 *data)
4760 {
4761         u64 val = nr64_ipp(IPP_CFIG);
4762
4763         nw64_ipp(IPP_CFIG, val | IPP_CFIG_DFIFO_PIO_W);
4764         nw64_ipp(IPP_DFIFO_WR_PTR, index);
4765         nw64_ipp(IPP_DFIFO_WR0, data[0]);
4766         nw64_ipp(IPP_DFIFO_WR1, data[1]);
4767         nw64_ipp(IPP_DFIFO_WR2, data[2]);
4768         nw64_ipp(IPP_DFIFO_WR3, data[3]);
4769         nw64_ipp(IPP_DFIFO_WR4, data[4]);
4770         nw64_ipp(IPP_CFIG, val & ~IPP_CFIG_DFIFO_PIO_W);
4771 }
4772
4773 static void niu_ipp_read(struct niu *np, int index, u64 *data)
4774 {
4775         nw64_ipp(IPP_DFIFO_RD_PTR, index);
4776         data[0] = nr64_ipp(IPP_DFIFO_RD0);
4777         data[1] = nr64_ipp(IPP_DFIFO_RD1);
4778         data[2] = nr64_ipp(IPP_DFIFO_RD2);
4779         data[3] = nr64_ipp(IPP_DFIFO_RD3);
4780         data[4] = nr64_ipp(IPP_DFIFO_RD4);
4781 }
4782
4783 static int niu_ipp_reset(struct niu *np)
4784 {
4785         return niu_set_and_wait_clear_ipp(np, IPP_CFIG, IPP_CFIG_SOFT_RST,
4786                                           1000, 100, "IPP_CFIG");
4787 }
4788
4789 static int niu_init_ipp(struct niu *np)
4790 {
4791         u64 data[5], rbuf[5], val;
4792         int i, max, err;
4793
4794         if (np->parent->plat_type != PLAT_TYPE_NIU) {
4795                 if (np->port == 0 || np->port == 1)
4796                         max = ATLAS_P0_P1_DFIFO_ENTRIES;
4797                 else
4798                         max = ATLAS_P2_P3_DFIFO_ENTRIES;
4799         } else
4800                 max = NIU_DFIFO_ENTRIES;
4801
4802         data[0] = 0;
4803         data[1] = 0;
4804         data[2] = 0;
4805         data[3] = 0;
4806         data[4] = 0;
4807
4808         for (i = 0; i < max; i++) {
4809                 niu_ipp_write(np, i, data);
4810                 niu_ipp_read(np, i, rbuf);
4811         }
4812
4813         (void) nr64_ipp(IPP_INT_STAT);
4814         (void) nr64_ipp(IPP_INT_STAT);
4815
4816         err = niu_ipp_reset(np);
4817         if (err)
4818                 return err;
4819
4820         (void) nr64_ipp(IPP_PKT_DIS);
4821         (void) nr64_ipp(IPP_BAD_CS_CNT);
4822         (void) nr64_ipp(IPP_ECC);
4823
4824         (void) nr64_ipp(IPP_INT_STAT);
4825
4826         nw64_ipp(IPP_MSK, ~IPP_MSK_ALL);
4827
4828         val = nr64_ipp(IPP_CFIG);
4829         val &= ~IPP_CFIG_IP_MAX_PKT;
4830         val |= (IPP_CFIG_IPP_ENABLE |
4831                 IPP_CFIG_DFIFO_ECC_EN |
4832                 IPP_CFIG_DROP_BAD_CRC |
4833                 IPP_CFIG_CKSUM_EN |
4834                 (0x1ffff << IPP_CFIG_IP_MAX_PKT_SHIFT));
4835         nw64_ipp(IPP_CFIG, val);
4836
4837         return 0;
4838 }
4839
4840 static void niu_handle_led(struct niu *np, int status)
4841 {
4842         u64 val;
4843         val = nr64_mac(XMAC_CONFIG);
4844
4845         if ((np->flags & NIU_FLAGS_10G) != 0 &&
4846             (np->flags & NIU_FLAGS_FIBER) != 0) {
4847                 if (status) {
4848                         val |= XMAC_CONFIG_LED_POLARITY;
4849                         val &= ~XMAC_CONFIG_FORCE_LED_ON;
4850                 } else {
4851                         val |= XMAC_CONFIG_FORCE_LED_ON;
4852                         val &= ~XMAC_CONFIG_LED_POLARITY;
4853                 }
4854         }
4855
4856         nw64_mac(XMAC_CONFIG, val);
4857 }
4858
4859 static void niu_init_xif_xmac(struct niu *np)
4860 {
4861         struct niu_link_config *lp = &np->link_config;
4862         u64 val;
4863
4864         if (np->flags & NIU_FLAGS_XCVR_SERDES) {
4865                 val = nr64(MIF_CONFIG);
4866                 val |= MIF_CONFIG_ATCA_GE;
4867                 nw64(MIF_CONFIG, val);
4868         }
4869
4870         val = nr64_mac(XMAC_CONFIG);
4871         val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC;
4872
4873         val |= XMAC_CONFIG_TX_OUTPUT_EN;
4874
4875         if (lp->loopback_mode == LOOPBACK_MAC) {
4876                 val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC;
4877                 val |= XMAC_CONFIG_LOOPBACK;
4878         } else {
4879                 val &= ~XMAC_CONFIG_LOOPBACK;
4880         }
4881
4882         if (np->flags & NIU_FLAGS_10G) {
4883                 val &= ~XMAC_CONFIG_LFS_DISABLE;
4884         } else {
4885                 val |= XMAC_CONFIG_LFS_DISABLE;
4886                 if (!(np->flags & NIU_FLAGS_FIBER) &&
4887                     !(np->flags & NIU_FLAGS_XCVR_SERDES))
4888                         val |= XMAC_CONFIG_1G_PCS_BYPASS;
4889                 else
4890                         val &= ~XMAC_CONFIG_1G_PCS_BYPASS;
4891         }
4892
4893         val &= ~XMAC_CONFIG_10G_XPCS_BYPASS;
4894
4895         if (lp->active_speed == SPEED_100)
4896                 val |= XMAC_CONFIG_SEL_CLK_25MHZ;
4897         else
4898                 val &= ~XMAC_CONFIG_SEL_CLK_25MHZ;
4899
4900         nw64_mac(XMAC_CONFIG, val);
4901
4902         val = nr64_mac(XMAC_CONFIG);
4903         val &= ~XMAC_CONFIG_MODE_MASK;
4904         if (np->flags & NIU_FLAGS_10G) {
4905                 val |= XMAC_CONFIG_MODE_XGMII;
4906         } else {
4907                 if (lp->active_speed == SPEED_100)
4908                         val |= XMAC_CONFIG_MODE_MII;
4909                 else
4910                         val |= XMAC_CONFIG_MODE_GMII;
4911         }
4912
4913         nw64_mac(XMAC_CONFIG, val);
4914 }
4915
4916 static void niu_init_xif_bmac(struct niu *np)
4917 {
4918         struct niu_link_config *lp = &np->link_config;
4919         u64 val;
4920
4921         val = BMAC_XIF_CONFIG_TX_OUTPUT_EN;
4922
4923         if (lp->loopback_mode == LOOPBACK_MAC)
4924                 val |= BMAC_XIF_CONFIG_MII_LOOPBACK;
4925         else
4926                 val &= ~BMAC_XIF_CONFIG_MII_LOOPBACK;
4927
4928         if (lp->active_speed == SPEED_1000)
4929                 val |= BMAC_XIF_CONFIG_GMII_MODE;
4930         else
4931                 val &= ~BMAC_XIF_CONFIG_GMII_MODE;
4932
4933         val &= ~(BMAC_XIF_CONFIG_LINK_LED |
4934                  BMAC_XIF_CONFIG_LED_POLARITY);
4935
4936         if (!(np->flags & NIU_FLAGS_10G) &&
4937             !(np->flags & NIU_FLAGS_FIBER) &&
4938             lp->active_speed == SPEED_100)
4939                 val |= BMAC_XIF_CONFIG_25MHZ_CLOCK;
4940         else
4941                 val &= ~BMAC_XIF_CONFIG_25MHZ_CLOCK;
4942
4943         nw64_mac(BMAC_XIF_CONFIG, val);
4944 }
4945
4946 static void niu_init_xif(struct niu *np)
4947 {
4948         if (np->flags & NIU_FLAGS_XMAC)
4949                 niu_init_xif_xmac(np);
4950         else
4951                 niu_init_xif_bmac(np);
4952 }
4953
4954 static void niu_pcs_mii_reset(struct niu *np)
4955 {
4956         int limit = 1000;
4957         u64 val = nr64_pcs(PCS_MII_CTL);
4958         val |= PCS_MII_CTL_RST;
4959         nw64_pcs(PCS_MII_CTL, val);
4960         while ((--limit >= 0) && (val & PCS_MII_CTL_RST)) {
4961                 udelay(100);
4962                 val = nr64_pcs(PCS_MII_CTL);
4963         }
4964 }
4965
4966 static void niu_xpcs_reset(struct niu *np)
4967 {
4968         int limit = 1000;
4969         u64 val = nr64_xpcs(XPCS_CONTROL1);
4970         val |= XPCS_CONTROL1_RESET;
4971         nw64_xpcs(XPCS_CONTROL1, val);
4972         while ((--limit >= 0) && (val & XPCS_CONTROL1_RESET)) {
4973                 udelay(100);
4974                 val = nr64_xpcs(XPCS_CONTROL1);
4975         }
4976 }
4977
4978 static int niu_init_pcs(struct niu *np)
4979 {
4980         struct niu_link_config *lp = &np->link_config;
4981         u64 val;
4982
4983         switch (np->flags & (NIU_FLAGS_10G |
4984                              NIU_FLAGS_FIBER |
4985                              NIU_FLAGS_XCVR_SERDES)) {
4986         case NIU_FLAGS_FIBER:
4987                 /* 1G fiber */
4988                 nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE);
4989                 nw64_pcs(PCS_DPATH_MODE, 0);
4990                 niu_pcs_mii_reset(np);
4991                 break;
4992
4993         case NIU_FLAGS_10G:
4994         case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
4995         case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
4996                 /* 10G SERDES */
4997                 if (!(np->flags & NIU_FLAGS_XMAC))
4998                         return -EINVAL;
4999
5000                 /* 10G copper or fiber */
5001                 val = nr64_mac(XMAC_CONFIG);
5002                 val &= ~XMAC_CONFIG_10G_XPCS_BYPASS;
5003                 nw64_mac(XMAC_CONFIG, val);
5004
5005                 niu_xpcs_reset(np);
5006
5007                 val = nr64_xpcs(XPCS_CONTROL1);
5008                 if (lp->loopback_mode == LOOPBACK_PHY)
5009                         val |= XPCS_CONTROL1_LOOPBACK;
5010                 else
5011                         val &= ~XPCS_CONTROL1_LOOPBACK;
5012                 nw64_xpcs(XPCS_CONTROL1, val);
5013
5014                 nw64_xpcs(XPCS_DESKEW_ERR_CNT, 0);
5015                 (void) nr64_xpcs(XPCS_SYMERR_CNT01);
5016                 (void) nr64_xpcs(XPCS_SYMERR_CNT23);
5017                 break;
5018
5019
5020         case NIU_FLAGS_XCVR_SERDES:
5021                 /* 1G SERDES */
5022                 niu_pcs_mii_reset(np);
5023                 nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE);
5024                 nw64_pcs(PCS_DPATH_MODE, 0);
5025                 break;
5026
5027         case 0:
5028                 /* 1G copper */
5029         case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER:
5030                 /* 1G RGMII FIBER */
5031                 nw64_pcs(PCS_DPATH_MODE, PCS_DPATH_MODE_MII);
5032                 niu_pcs_mii_reset(np);
5033                 break;
5034
5035         default:
5036                 return -EINVAL;
5037         }
5038
5039         return 0;
5040 }
5041
5042 static int niu_reset_tx_xmac(struct niu *np)
5043 {
5044         return niu_set_and_wait_clear_mac(np, XTXMAC_SW_RST,
5045                                           (XTXMAC_SW_RST_REG_RS |
5046                                            XTXMAC_SW_RST_SOFT_RST),
5047                                           1000, 100, "XTXMAC_SW_RST");
5048 }
5049
5050 static int niu_reset_tx_bmac(struct niu *np)
5051 {
5052         int limit;
5053
5054         nw64_mac(BTXMAC_SW_RST, BTXMAC_SW_RST_RESET);
5055         limit = 1000;
5056         while (--limit >= 0) {
5057                 if (!(nr64_mac(BTXMAC_SW_RST) & BTXMAC_SW_RST_RESET))
5058                         break;
5059                 udelay(100);
5060         }
5061         if (limit < 0) {
5062                 dev_err(np->device, PFX "Port %u TX BMAC would not reset, "
5063                         "BTXMAC_SW_RST[%llx]\n",
5064                         np->port,
5065                         (unsigned long long) nr64_mac(BTXMAC_SW_RST));
5066                 return -ENODEV;
5067         }
5068
5069         return 0;
5070 }
5071
5072 static int niu_reset_tx_mac(struct niu *np)
5073 {
5074         if (np->flags & NIU_FLAGS_XMAC)
5075                 return niu_reset_tx_xmac(np);
5076         else
5077                 return niu_reset_tx_bmac(np);
5078 }
5079
5080 static void niu_init_tx_xmac(struct niu *np, u64 min, u64 max)
5081 {
5082         u64 val;
5083
5084         val = nr64_mac(XMAC_MIN);
5085         val &= ~(XMAC_MIN_TX_MIN_PKT_SIZE |
5086                  XMAC_MIN_RX_MIN_PKT_SIZE);
5087         val |= (min << XMAC_MIN_RX_MIN_PKT_SIZE_SHFT);
5088         val |= (min << XMAC_MIN_TX_MIN_PKT_SIZE_SHFT);
5089         nw64_mac(XMAC_MIN, val);
5090
5091         nw64_mac(XMAC_MAX, max);
5092
5093         nw64_mac(XTXMAC_STAT_MSK, ~(u64)0);
5094
5095         val = nr64_mac(XMAC_IPG);
5096         if (np->flags & NIU_FLAGS_10G) {
5097                 val &= ~XMAC_IPG_IPG_XGMII;
5098                 val |= (IPG_12_15_XGMII << XMAC_IPG_IPG_XGMII_SHIFT);
5099         } else {
5100                 val &= ~XMAC_IPG_IPG_MII_GMII;
5101                 val |= (IPG_12_MII_GMII << XMAC_IPG_IPG_MII_GMII_SHIFT);
5102         }
5103         nw64_mac(XMAC_IPG, val);
5104
5105         val = nr64_mac(XMAC_CONFIG);
5106         val &= ~(XMAC_CONFIG_ALWAYS_NO_CRC |
5107                  XMAC_CONFIG_STRETCH_MODE |
5108                  XMAC_CONFIG_VAR_MIN_IPG_EN |
5109                  XMAC_CONFIG_TX_ENABLE);
5110         nw64_mac(XMAC_CONFIG, val);
5111
5112         nw64_mac(TXMAC_FRM_CNT, 0);
5113         nw64_mac(TXMAC_BYTE_CNT, 0);
5114 }
5115
5116 static void niu_init_tx_bmac(struct niu *np, u64 min, u64 max)
5117 {
5118         u64 val;
5119
5120         nw64_mac(BMAC_MIN_FRAME, min);
5121         nw64_mac(BMAC_MAX_FRAME, max);
5122
5123         nw64_mac(BTXMAC_STATUS_MASK, ~(u64)0);
5124         nw64_mac(BMAC_CTRL_TYPE, 0x8808);
5125         nw64_mac(BMAC_PREAMBLE_SIZE, 7);
5126
5127         val = nr64_mac(BTXMAC_CONFIG);
5128         val &= ~(BTXMAC_CONFIG_FCS_DISABLE |
5129                  BTXMAC_CONFIG_ENABLE);
5130         nw64_mac(BTXMAC_CONFIG, val);
5131 }
5132
5133 static void niu_init_tx_mac(struct niu *np)
5134 {
5135         u64 min, max;
5136
5137         min = 64;
5138         if (np->dev->mtu > ETH_DATA_LEN)
5139                 max = 9216;
5140         else
5141                 max = 1522;
5142
5143         /* The XMAC_MIN register only accepts values for TX min which
5144          * have the low 3 bits cleared.
5145          */
5146         BUILD_BUG_ON(min & 0x7);
5147
5148         if (np->flags & NIU_FLAGS_XMAC)
5149                 niu_init_tx_xmac(np, min, max);
5150         else
5151                 niu_init_tx_bmac(np, min, max);
5152 }
5153
5154 static int niu_reset_rx_xmac(struct niu *np)
5155 {
5156         int limit;
5157
5158         nw64_mac(XRXMAC_SW_RST,
5159                  XRXMAC_SW_RST_REG_RS | XRXMAC_SW_RST_SOFT_RST);
5160         limit = 1000;
5161         while (--limit >= 0) {
5162                 if (!(nr64_mac(XRXMAC_SW_RST) & (XRXMAC_SW_RST_REG_RS |
5163                                                  XRXMAC_SW_RST_SOFT_RST)))
5164                     break;
5165                 udelay(100);
5166         }
5167         if (limit < 0) {
5168                 dev_err(np->device, PFX "Port %u RX XMAC would not reset, "
5169                         "XRXMAC_SW_RST[%llx]\n",
5170                         np->port,
5171                         (unsigned long long) nr64_mac(XRXMAC_SW_RST));
5172                 return -ENODEV;
5173         }
5174
5175         return 0;
5176 }
5177
5178 static int niu_reset_rx_bmac(struct niu *np)
5179 {
5180         int limit;
5181
5182         nw64_mac(BRXMAC_SW_RST, BRXMAC_SW_RST_RESET);
5183         limit = 1000;
5184         while (--limit >= 0) {
5185                 if (!(nr64_mac(BRXMAC_SW_RST) & BRXMAC_SW_RST_RESET))
5186                         break;
5187                 udelay(100);
5188         }
5189         if (limit < 0) {
5190                 dev_err(np->device, PFX "Port %u RX BMAC would not reset, "
5191                         "BRXMAC_SW_RST[%llx]\n",
5192                         np->port,
5193                         (unsigned long long) nr64_mac(BRXMAC_SW_RST));
5194                 return -ENODEV;
5195         }
5196
5197         return 0;
5198 }
5199
5200 static int niu_reset_rx_mac(struct niu *np)
5201 {
5202         if (np->flags & NIU_FLAGS_XMAC)
5203                 return niu_reset_rx_xmac(np);
5204         else
5205                 return niu_reset_rx_bmac(np);
5206 }
5207
5208 static void niu_init_rx_xmac(struct niu *np)
5209 {
5210         struct niu_parent *parent = np->parent;
5211         struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
5212         int first_rdc_table = tp->first_table_num;
5213         unsigned long i;
5214         u64 val;
5215
5216         nw64_mac(XMAC_ADD_FILT0, 0);
5217         nw64_mac(XMAC_ADD_FILT1, 0);
5218         nw64_mac(XMAC_ADD_FILT2, 0);
5219         nw64_mac(XMAC_ADD_FILT12_MASK, 0);
5220         nw64_mac(XMAC_ADD_FILT00_MASK, 0);
5221         for (i = 0; i < MAC_NUM_HASH; i++)
5222                 nw64_mac(XMAC_HASH_TBL(i), 0);
5223         nw64_mac(XRXMAC_STAT_MSK, ~(u64)0);
5224         niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
5225         niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
5226
5227         val = nr64_mac(XMAC_CONFIG);
5228         val &= ~(XMAC_CONFIG_RX_MAC_ENABLE |
5229                  XMAC_CONFIG_PROMISCUOUS |
5230                  XMAC_CONFIG_PROMISC_GROUP |
5231                  XMAC_CONFIG_ERR_CHK_DIS |
5232                  XMAC_CONFIG_RX_CRC_CHK_DIS |
5233                  XMAC_CONFIG_RESERVED_MULTICAST |
5234                  XMAC_CONFIG_RX_CODEV_CHK_DIS |
5235                  XMAC_CONFIG_ADDR_FILTER_EN |
5236                  XMAC_CONFIG_RCV_PAUSE_ENABLE |
5237                  XMAC_CONFIG_STRIP_CRC |
5238                  XMAC_CONFIG_PASS_FLOW_CTRL |
5239                  XMAC_CONFIG_MAC2IPP_PKT_CNT_EN);
5240         val |= (XMAC_CONFIG_HASH_FILTER_EN);
5241         nw64_mac(XMAC_CONFIG, val);
5242
5243         nw64_mac(RXMAC_BT_CNT, 0);
5244         nw64_mac(RXMAC_BC_FRM_CNT, 0);
5245         nw64_mac(RXMAC_MC_FRM_CNT, 0);
5246         nw64_mac(RXMAC_FRAG_CNT, 0);
5247         nw64_mac(RXMAC_HIST_CNT1, 0);
5248         nw64_mac(RXMAC_HIST_CNT2, 0);
5249         nw64_mac(RXMAC_HIST_CNT3, 0);
5250         nw64_mac(RXMAC_HIST_CNT4, 0);
5251         nw64_mac(RXMAC_HIST_CNT5, 0);
5252         nw64_mac(RXMAC_HIST_CNT6, 0);
5253         nw64_mac(RXMAC_HIST_CNT7, 0);
5254         nw64_mac(RXMAC_MPSZER_CNT, 0);
5255         nw64_mac(RXMAC_CRC_ER_CNT, 0);
5256         nw64_mac(RXMAC_CD_VIO_CNT, 0);
5257         nw64_mac(LINK_FAULT_CNT, 0);
5258 }
5259
5260 static void niu_init_rx_bmac(struct niu *np)
5261 {
5262         struct niu_parent *parent = np->parent;
5263         struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
5264         int first_rdc_table = tp->first_table_num;
5265         unsigned long i;
5266         u64 val;
5267
5268         nw64_mac(BMAC_ADD_FILT0, 0);
5269         nw64_mac(BMAC_ADD_FILT1, 0);
5270         nw64_mac(BMAC_ADD_FILT2, 0);
5271         nw64_mac(BMAC_ADD_FILT12_MASK, 0);
5272         nw64_mac(BMAC_ADD_FILT00_MASK, 0);
5273         for (i = 0; i < MAC_NUM_HASH; i++)
5274                 nw64_mac(BMAC_HASH_TBL(i), 0);
5275         niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
5276         niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
5277         nw64_mac(BRXMAC_STATUS_MASK, ~(u64)0);
5278
5279         val = nr64_mac(BRXMAC_CONFIG);
5280         val &= ~(BRXMAC_CONFIG_ENABLE |
5281                  BRXMAC_CONFIG_STRIP_PAD |
5282                  BRXMAC_CONFIG_STRIP_FCS |
5283                  BRXMAC_CONFIG_PROMISC |
5284                  BRXMAC_CONFIG_PROMISC_GRP |
5285                  BRXMAC_CONFIG_ADDR_FILT_EN |
5286                  BRXMAC_CONFIG_DISCARD_DIS);
5287         val |= (BRXMAC_CONFIG_HASH_FILT_EN);
5288         nw64_mac(BRXMAC_CONFIG, val);
5289
5290         val = nr64_mac(BMAC_ADDR_CMPEN);
5291         val |= BMAC_ADDR_CMPEN_EN0;
5292         nw64_mac(BMAC_ADDR_CMPEN, val);
5293 }
5294
5295 static void niu_init_rx_mac(struct niu *np)
5296 {
5297         niu_set_primary_mac(np, np->dev->dev_addr);
5298
5299         if (np->flags & NIU_FLAGS_XMAC)
5300                 niu_init_rx_xmac(np);
5301         else
5302                 niu_init_rx_bmac(np);
5303 }
5304
5305 static void niu_enable_tx_xmac(struct niu *np, int on)
5306 {
5307         u64 val = nr64_mac(XMAC_CONFIG);
5308
5309         if (on)
5310                 val |= XMAC_CONFIG_TX_ENABLE;
5311         else
5312                 val &= ~XMAC_CONFIG_TX_ENABLE;
5313         nw64_mac(XMAC_CONFIG, val);
5314 }
5315
5316 static void niu_enable_tx_bmac(struct niu *np, int on)
5317 {
5318         u64 val = nr64_mac(BTXMAC_CONFIG);
5319
5320         if (on)
5321                 val |= BTXMAC_CONFIG_ENABLE;
5322         else
5323                 val &= ~BTXMAC_CONFIG_ENABLE;
5324         nw64_mac(BTXMAC_CONFIG, val);
5325 }
5326
5327 static void niu_enable_tx_mac(struct niu *np, int on)
5328 {
5329         if (np->flags & NIU_FLAGS_XMAC)
5330                 niu_enable_tx_xmac(np, on);
5331         else
5332                 niu_enable_tx_bmac(np, on);
5333 }
5334
5335 static void niu_enable_rx_xmac(struct niu *np, int on)
5336 {
5337         u64 val = nr64_mac(XMAC_CONFIG);
5338
5339         val &= ~(XMAC_CONFIG_HASH_FILTER_EN |
5340                  XMAC_CONFIG_PROMISCUOUS);
5341
5342         if (np->flags & NIU_FLAGS_MCAST)
5343                 val |= XMAC_CONFIG_HASH_FILTER_EN;
5344         if (np->flags & NIU_FLAGS_PROMISC)
5345                 val |= XMAC_CONFIG_PROMISCUOUS;
5346
5347         if (on)
5348                 val |= XMAC_CONFIG_RX_MAC_ENABLE;
5349         else
5350                 val &= ~XMAC_CONFIG_RX_MAC_ENABLE;
5351         nw64_mac(XMAC_CONFIG, val);
5352 }
5353
5354 static void niu_enable_rx_bmac(struct niu *np, int on)
5355 {
5356         u64 val = nr64_mac(BRXMAC_CONFIG);
5357
5358         val &= ~(BRXMAC_CONFIG_HASH_FILT_EN |
5359                  BRXMAC_CONFIG_PROMISC);
5360
5361         if (np->flags & NIU_FLAGS_MCAST)
5362                 val |= BRXMAC_CONFIG_HASH_FILT_EN;
5363         if (np->flags & NIU_FLAGS_PROMISC)
5364                 val |= BRXMAC_CONFIG_PROMISC;
5365
5366         if (on)
5367                 val |= BRXMAC_CONFIG_ENABLE;
5368         else
5369                 val &= ~BRXMAC_CONFIG_ENABLE;
5370         nw64_mac(BRXMAC_CONFIG, val);
5371 }
5372
5373 static void niu_enable_rx_mac(struct niu *np, int on)
5374 {
5375         if (np->flags & NIU_FLAGS_XMAC)
5376                 niu_enable_rx_xmac(np, on);
5377         else
5378                 niu_enable_rx_bmac(np, on);
5379 }
5380
5381 static int niu_init_mac(struct niu *np)
5382 {
5383         int err;
5384
5385         niu_init_xif(np);
5386         err = niu_init_pcs(np);
5387         if (err)
5388                 return err;
5389
5390         err = niu_reset_tx_mac(np);
5391         if (err)
5392                 return err;
5393         niu_init_tx_mac(np);
5394         err = niu_reset_rx_mac(np);
5395         if (err)
5396                 return err;
5397         niu_init_rx_mac(np);
5398
5399         /* This looks hookey but the RX MAC reset we just did will
5400          * undo some of the state we setup in niu_init_tx_mac() so we
5401          * have to call it again.  In particular, the RX MAC reset will
5402          * set the XMAC_MAX register back to it's default value.
5403          */
5404         niu_init_tx_mac(np);
5405         niu_enable_tx_mac(np, 1);
5406
5407         niu_enable_rx_mac(np, 1);
5408
5409         return 0;
5410 }
5411
5412 static void niu_stop_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
5413 {
5414         (void) niu_tx_channel_stop(np, rp->tx_channel);
5415 }
5416
5417 static void niu_stop_tx_channels(struct niu *np)
5418 {
5419         int i;
5420
5421         for (i = 0; i < np->num_tx_rings; i++) {
5422                 struct tx_ring_info *rp = &np->tx_rings[i];
5423
5424                 niu_stop_one_tx_channel(np, rp);
5425         }
5426 }
5427
5428 static void niu_reset_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
5429 {
5430         (void) niu_tx_channel_reset(np, rp->tx_channel);
5431 }
5432
5433 static void niu_reset_tx_channels(struct niu *np)
5434 {
5435         int i;
5436
5437         for (i = 0; i < np->num_tx_rings; i++) {
5438                 struct tx_ring_info *rp = &np->tx_rings[i];
5439
5440                 niu_reset_one_tx_channel(np, rp);
5441         }
5442 }
5443
5444 static void niu_stop_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
5445 {
5446         (void) niu_enable_rx_channel(np, rp->rx_channel, 0);
5447 }
5448
5449 static void niu_stop_rx_channels(struct niu *np)
5450 {
5451         int i;
5452
5453         for (i = 0; i < np->num_rx_rings; i++) {
5454                 struct rx_ring_info *rp = &np->rx_rings[i];
5455
5456                 niu_stop_one_rx_channel(np, rp);
5457         }
5458 }
5459
5460 static void niu_reset_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
5461 {
5462         int channel = rp->rx_channel;
5463
5464         (void) niu_rx_channel_reset(np, channel);
5465         nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_ALL);
5466         nw64(RX_DMA_CTL_STAT(channel), 0);
5467         (void) niu_enable_rx_channel(np, channel, 0);
5468 }
5469
5470 static void niu_reset_rx_channels(struct niu *np)
5471 {
5472         int i;
5473
5474         for (i = 0; i < np->num_rx_rings; i++) {
5475                 struct rx_ring_info *rp = &np->rx_rings[i];
5476
5477                 niu_reset_one_rx_channel(np, rp);
5478         }
5479 }
5480
5481 static void niu_disable_ipp(struct niu *np)
5482 {
5483         u64 rd, wr, val;
5484         int limit;
5485
5486         rd = nr64_ipp(IPP_DFIFO_RD_PTR);
5487         wr = nr64_ipp(IPP_DFIFO_WR_PTR);
5488         limit = 100;
5489         while (--limit >= 0 && (rd != wr)) {
5490                 rd = nr64_ipp(IPP_DFIFO_RD_PTR);
5491                 wr = nr64_ipp(IPP_DFIFO_WR_PTR);
5492         }
5493         if (limit < 0 &&
5494             (rd != 0 && wr != 1)) {
5495                 dev_err(np->device, PFX "%s: IPP would not quiesce, "
5496                         "rd_ptr[%llx] wr_ptr[%llx]\n",
5497                         np->dev->name,
5498                         (unsigned long long) nr64_ipp(IPP_DFIFO_RD_PTR),
5499                         (unsigned long long) nr64_ipp(IPP_DFIFO_WR_PTR));
5500         }
5501
5502         val = nr64_ipp(IPP_CFIG);
5503         val &= ~(IPP_CFIG_IPP_ENABLE |
5504                  IPP_CFIG_DFIFO_ECC_EN |
5505                  IPP_CFIG_DROP_BAD_CRC |
5506                  IPP_CFIG_CKSUM_EN);
5507         nw64_ipp(IPP_CFIG, val);
5508
5509         (void) niu_ipp_reset(np);
5510 }
5511
5512 static int niu_init_hw(struct niu *np)
5513 {
5514         int i, err;
5515
5516         niudbg(IFUP, "%s: Initialize TXC\n", np->dev->name);
5517         niu_txc_enable_port(np, 1);
5518         niu_txc_port_dma_enable(np, 1);
5519         niu_txc_set_imask(np, 0);
5520
5521         niudbg(IFUP, "%s: Initialize TX channels\n", np->dev->name);
5522         for (i = 0; i < np->num_tx_rings; i++) {
5523                 struct tx_ring_info *rp = &np->tx_rings[i];
5524
5525                 err = niu_init_one_tx_channel(np, rp);
5526                 if (err)
5527                         return err;
5528         }
5529
5530         niudbg(IFUP, "%s: Initialize RX channels\n", np->dev->name);
5531         err = niu_init_rx_channels(np);
5532         if (err)
5533                 goto out_uninit_tx_channels;
5534
5535         niudbg(IFUP, "%s: Initialize classifier\n", np->dev->name);
5536         err = niu_init_classifier_hw(np);
5537         if (err)
5538                 goto out_uninit_rx_channels;
5539
5540         niudbg(IFUP, "%s: Initialize ZCP\n", np->dev->name);
5541         err = niu_init_zcp(np);
5542         if (err)
5543                 goto out_uninit_rx_channels;
5544
5545         niudbg(IFUP, "%s: Initialize IPP\n", np->dev->name);
5546         err = niu_init_ipp(np);
5547         if (err)
5548                 goto out_uninit_rx_channels;
5549
5550         niudbg(IFUP, "%s: Initialize MAC\n", np->dev->name);
5551         err = niu_init_mac(np);
5552         if (err)
5553                 goto out_uninit_ipp;
5554
5555         return 0;
5556
5557 out_uninit_ipp:
5558         niudbg(IFUP, "%s: Uninit IPP\n", np->dev->name);
5559         niu_disable_ipp(np);
5560
5561 out_uninit_rx_channels:
5562         niudbg(IFUP, "%s: Uninit RX channels\n", np->dev->name);
5563         niu_stop_rx_channels(np);
5564         niu_reset_rx_channels(np);
5565
5566 out_uninit_tx_channels:
5567         niudbg(IFUP, "%s: Uninit TX channels\n", np->dev->name);
5568         niu_stop_tx_channels(np);
5569         niu_reset_tx_channels(np);
5570
5571         return err;
5572 }
5573
5574 static void niu_stop_hw(struct niu *np)
5575 {
5576         niudbg(IFDOWN, "%s: Disable interrupts\n", np->dev->name);
5577         niu_enable_interrupts(np, 0);
5578
5579         niudbg(IFDOWN, "%s: Disable RX MAC\n", np->dev->name);
5580         niu_enable_rx_mac(np, 0);
5581
5582         niudbg(IFDOWN, "%s: Disable IPP\n", np->dev->name);
5583         niu_disable_ipp(np);
5584
5585         niudbg(IFDOWN, "%s: Stop TX channels\n", np->dev->name);
5586         niu_stop_tx_channels(np);
5587
5588         niudbg(IFDOWN, "%s: Stop RX channels\n", np->dev->name);
5589         niu_stop_rx_channels(np);
5590
5591         niudbg(IFDOWN, "%s: Reset TX channels\n", np->dev->name);
5592         niu_reset_tx_channels(np);
5593
5594         niudbg(IFDOWN, "%s: Reset RX channels\n", np->dev->name);
5595         niu_reset_rx_channels(np);
5596 }
5597
5598 static int niu_request_irq(struct niu *np)
5599 {
5600         int i, j, err;
5601
5602         err = 0;
5603         for (i = 0; i < np->num_ldg; i++) {
5604                 struct niu_ldg *lp = &np->ldg[i];
5605
5606                 err = request_irq(lp->irq, niu_interrupt,
5607                                   IRQF_SHARED | IRQF_SAMPLE_RANDOM,
5608                                   np->dev->name, lp);
5609                 if (err)
5610                         goto out_free_irqs;
5611
5612         }
5613
5614         return 0;
5615
5616 out_free_irqs:
5617         for (j = 0; j < i; j++) {
5618                 struct niu_ldg *lp = &np->ldg[j];
5619
5620                 free_irq(lp->irq, lp);
5621         }
5622         return err;
5623 }
5624
5625 static void niu_free_irq(struct niu *np)
5626 {
5627         int i;
5628
5629         for (i = 0; i < np->num_ldg; i++) {
5630                 struct niu_ldg *lp = &np->ldg[i];
5631
5632                 free_irq(lp->irq, lp);
5633         }
5634 }
5635
5636 static void niu_enable_napi(struct niu *np)
5637 {
5638         int i;
5639
5640         for (i = 0; i < np->num_ldg; i++)
5641                 napi_enable(&np->ldg[i].napi);
5642 }
5643
5644 static void niu_disable_napi(struct niu *np)
5645 {
5646         int i;
5647
5648         for (i = 0; i < np->num_ldg; i++)
5649                 napi_disable(&np->ldg[i].napi);
5650 }
5651
5652 static int niu_open(struct net_device *dev)
5653 {
5654         struct niu *np = netdev_priv(dev);
5655         int err;
5656
5657         netif_carrier_off(dev);
5658
5659         err = niu_alloc_channels(np);
5660         if (err)
5661                 goto out_err;
5662
5663         err = niu_enable_interrupts(np, 0);
5664         if (err)
5665                 goto out_free_channels;
5666
5667         err = niu_request_irq(np);
5668         if (err)
5669                 goto out_free_channels;
5670
5671         niu_enable_napi(np);
5672
5673         spin_lock_irq(&np->lock);
5674
5675         err = niu_init_hw(np);
5676         if (!err) {
5677                 init_timer(&np->timer);
5678                 np->timer.expires = jiffies + HZ;
5679                 np->timer.data = (unsigned long) np;
5680                 np->timer.function = niu_timer;
5681
5682                 err = niu_enable_interrupts(np, 1);
5683                 if (err)
5684                         niu_stop_hw(np);
5685         }
5686
5687         spin_unlock_irq(&np->lock);
5688
5689         if (err) {
5690                 niu_disable_napi(np);
5691                 goto out_free_irq;
5692         }
5693
5694         netif_tx_start_all_queues(dev);
5695
5696         if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
5697                 netif_carrier_on(dev);
5698
5699         add_timer(&np->timer);
5700
5701         return 0;
5702
5703 out_free_irq:
5704         niu_free_irq(np);
5705
5706 out_free_channels:
5707         niu_free_channels(np);
5708
5709 out_err:
5710         return err;
5711 }
5712
5713 static void niu_full_shutdown(struct niu *np, struct net_device *dev)
5714 {
5715         cancel_work_sync(&np->reset_task);
5716
5717         niu_disable_napi(np);
5718         netif_tx_stop_all_queues(dev);
5719
5720         del_timer_sync(&np->timer);
5721
5722         spin_lock_irq(&np->lock);
5723
5724         niu_stop_hw(np);
5725
5726         spin_unlock_irq(&np->lock);
5727 }
5728
5729 static int niu_close(struct net_device *dev)
5730 {
5731         struct niu *np = netdev_priv(dev);
5732
5733         niu_full_shutdown(np, dev);
5734
5735         niu_free_irq(np);
5736
5737         niu_free_channels(np);
5738
5739         niu_handle_led(np, 0);
5740
5741         return 0;
5742 }
5743
5744 static void niu_sync_xmac_stats(struct niu *np)
5745 {
5746         struct niu_xmac_stats *mp = &np->mac_stats.xmac;
5747
5748         mp->tx_frames += nr64_mac(TXMAC_FRM_CNT);
5749         mp->tx_bytes += nr64_mac(TXMAC_BYTE_CNT);
5750
5751         mp->rx_link_faults += nr64_mac(LINK_FAULT_CNT);
5752         mp->rx_align_errors += nr64_mac(RXMAC_ALIGN_ERR_CNT);
5753         mp->rx_frags += nr64_mac(RXMAC_FRAG_CNT);
5754         mp->rx_mcasts += nr64_mac(RXMAC_MC_FRM_CNT);
5755         mp->rx_bcasts += nr64_mac(RXMAC_BC_FRM_CNT);
5756         mp->rx_hist_cnt1 += nr64_mac(RXMAC_HIST_CNT1);
5757         mp->rx_hist_cnt2 += nr64_mac(RXMAC_HIST_CNT2);
5758         mp->rx_hist_cnt3 += nr64_mac(RXMAC_HIST_CNT3);
5759         mp->rx_hist_cnt4 += nr64_mac(RXMAC_HIST_CNT4);
5760         mp->rx_hist_cnt5 += nr64_mac(RXMAC_HIST_CNT5);
5761         mp->rx_hist_cnt6 += nr64_mac(RXMAC_HIST_CNT6);
5762         mp->rx_hist_cnt7 += nr64_mac(RXMAC_HIST_CNT7);
5763         mp->rx_octets += nr64_mac(RXMAC_BT_CNT);
5764         mp->rx_code_violations += nr64_mac(RXMAC_CD_VIO_CNT);
5765         mp->rx_len_errors += nr64_mac(RXMAC_MPSZER_CNT);
5766         mp->rx_crc_errors += nr64_mac(RXMAC_CRC_ER_CNT);
5767 }
5768
5769 static void niu_sync_bmac_stats(struct niu *np)
5770 {
5771         struct niu_bmac_stats *mp = &np->mac_stats.bmac;
5772
5773         mp->tx_bytes += nr64_mac(BTXMAC_BYTE_CNT);
5774         mp->tx_frames += nr64_mac(BTXMAC_FRM_CNT);
5775
5776         mp->rx_frames += nr64_mac(BRXMAC_FRAME_CNT);
5777         mp->rx_align_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT);
5778         mp->rx_crc_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT);
5779         mp->rx_len_errors += nr64_mac(BRXMAC_CODE_VIOL_ERR_CNT);
5780 }
5781
5782 static void niu_sync_mac_stats(struct niu *np)
5783 {
5784         if (np->flags & NIU_FLAGS_XMAC)
5785                 niu_sync_xmac_stats(np);
5786         else
5787                 niu_sync_bmac_stats(np);
5788 }
5789
5790 static void niu_get_rx_stats(struct niu *np)
5791 {
5792         unsigned long pkts, dropped, errors, bytes;
5793         int i;
5794
5795         pkts = dropped = errors = bytes = 0;
5796         for (i = 0; i < np->num_rx_rings; i++) {
5797                 struct rx_ring_info *rp = &np->rx_rings[i];
5798
5799                 pkts += rp->rx_packets;
5800                 bytes += rp->rx_bytes;
5801                 dropped += rp->rx_dropped;
5802                 errors += rp->rx_errors;
5803         }
5804         np->net_stats.rx_packets = pkts;
5805         np->net_stats.rx_bytes = bytes;
5806         np->net_stats.rx_dropped = dropped;
5807         np->net_stats.rx_errors = errors;
5808 }
5809
5810 static void niu_get_tx_stats(struct niu *np)
5811 {
5812         unsigned long pkts, errors, bytes;
5813         int i;
5814
5815         pkts = errors = bytes = 0;
5816         for (i = 0; i < np->num_tx_rings; i++) {
5817                 struct tx_ring_info *rp = &np->tx_rings[i];
5818
5819                 pkts += rp->tx_packets;
5820                 bytes += rp->tx_bytes;
5821                 errors += rp->tx_errors;
5822         }
5823         np->net_stats.tx_packets = pkts;
5824         np->net_stats.tx_bytes = bytes;
5825         np->net_stats.tx_errors = errors;
5826 }
5827
5828 static struct net_device_stats *niu_get_stats(struct net_device *dev)
5829 {
5830         struct niu *np = netdev_priv(dev);
5831
5832         niu_get_rx_stats(np);
5833         niu_get_tx_stats(np);
5834
5835         return &np->net_stats;
5836 }
5837
5838 static void niu_load_hash_xmac(struct niu *np, u16 *hash)
5839 {
5840         int i;
5841
5842         for (i = 0; i < 16; i++)
5843                 nw64_mac(XMAC_HASH_TBL(i), hash[i]);
5844 }
5845
5846 static void niu_load_hash_bmac(struct niu *np, u16 *hash)
5847 {
5848         int i;
5849
5850         for (i = 0; i < 16; i++)
5851                 nw64_mac(BMAC_HASH_TBL(i), hash[i]);
5852 }
5853
5854 static void niu_load_hash(struct niu *np, u16 *hash)
5855 {
5856         if (np->flags & NIU_FLAGS_XMAC)
5857                 niu_load_hash_xmac(np, hash);
5858         else
5859                 niu_load_hash_bmac(np, hash);
5860 }
5861
5862 static void niu_set_rx_mode(struct net_device *dev)
5863 {
5864         struct niu *np = netdev_priv(dev);
5865         int i, alt_cnt, err;
5866         struct dev_addr_list *addr;
5867         unsigned long flags;
5868         u16 hash[16] = { 0, };
5869
5870         spin_lock_irqsave(&np->lock, flags);
5871         niu_enable_rx_mac(np, 0);
5872
5873         np->flags &= ~(NIU_FLAGS_MCAST | NIU_FLAGS_PROMISC);
5874         if (dev->flags & IFF_PROMISC)
5875                 np->flags |= NIU_FLAGS_PROMISC;
5876         if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 0))
5877                 np->flags |= NIU_FLAGS_MCAST;
5878
5879         alt_cnt = dev->uc_count;
5880         if (alt_cnt > niu_num_alt_addr(np)) {
5881                 alt_cnt = 0;
5882                 np->flags |= NIU_FLAGS_PROMISC;
5883         }
5884
5885         if (alt_cnt) {
5886                 int index = 0;
5887
5888                 for (addr = dev->uc_list; addr; addr = addr->next) {
5889                         err = niu_set_alt_mac(np, index,
5890                                               addr->da_addr);
5891                         if (err)
5892                                 printk(KERN_WARNING PFX "%s: Error %d "
5893                                        "adding alt mac %d\n",
5894                                        dev->name, err, index);
5895                         err = niu_enable_alt_mac(np, index, 1);
5896                         if (err)
5897                                 printk(KERN_WARNING PFX "%s: Error %d "
5898                                        "enabling alt mac %d\n",
5899                                        dev->name, err, index);
5900
5901                         index++;
5902                 }
5903         } else {
5904                 int alt_start;
5905                 if (np->flags & NIU_FLAGS_XMAC)
5906                         alt_start = 0;
5907                 else
5908                         alt_start = 1;
5909                 for (i = alt_start; i < niu_num_alt_addr(np); i++) {
5910                         err = niu_enable_alt_mac(np, i, 0);
5911                         if (err)
5912                                 printk(KERN_WARNING PFX "%s: Error %d "
5913                                        "disabling alt mac %d\n",
5914                                        dev->name, err, i);
5915                 }
5916         }
5917         if (dev->flags & IFF_ALLMULTI) {
5918                 for (i = 0; i < 16; i++)
5919                         hash[i] = 0xffff;
5920         } else if (dev->mc_count > 0) {
5921                 for (addr = dev->mc_list; addr; addr = addr->next) {
5922                         u32 crc = ether_crc_le(ETH_ALEN, addr->da_addr);
5923
5924                         crc >>= 24;
5925                         hash[crc >> 4] |= (1 << (15 - (crc & 0xf)));
5926                 }
5927         }
5928
5929         if (np->flags & NIU_FLAGS_MCAST)
5930                 niu_load_hash(np, hash);
5931
5932         niu_enable_rx_mac(np, 1);
5933         spin_unlock_irqrestore(&np->lock, flags);
5934 }
5935
5936 static int niu_set_mac_addr(struct net_device *dev, void *p)
5937 {
5938         struct niu *np = netdev_priv(dev);
5939         struct sockaddr *addr = p;
5940         unsigned long flags;
5941
5942         if (!is_valid_ether_addr(addr->sa_data))
5943                 return -EINVAL;
5944
5945         memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
5946
5947         if (!netif_running(dev))
5948                 return 0;
5949
5950         spin_lock_irqsave(&np->lock, flags);
5951         niu_enable_rx_mac(np, 0);
5952         niu_set_primary_mac(np, dev->dev_addr);
5953         niu_enable_rx_mac(np, 1);
5954         spin_unlock_irqrestore(&np->lock, flags);
5955
5956         return 0;
5957 }
5958
5959 static int niu_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5960 {
5961         return -EOPNOTSUPP;
5962 }
5963
5964 static void niu_netif_stop(struct niu *np)
5965 {
5966         np->dev->trans_start = jiffies; /* prevent tx timeout */
5967
5968         niu_disable_napi(np);
5969
5970         netif_tx_disable(np->dev);
5971 }
5972
5973 static void niu_netif_start(struct niu *np)
5974 {
5975         /* NOTE: unconditional netif_wake_queue is only appropriate
5976          * so long as all callers are assured to have free tx slots
5977          * (such as after niu_init_hw).
5978          */
5979         netif_tx_wake_all_queues(np->dev);
5980
5981         niu_enable_napi(np);
5982
5983         niu_enable_interrupts(np, 1);
5984 }
5985
5986 static void niu_reset_buffers(struct niu *np)
5987 {
5988         int i, j, k, err;
5989
5990         if (np->rx_rings) {
5991                 for (i = 0; i < np->num_rx_rings; i++) {
5992                         struct rx_ring_info *rp = &np->rx_rings[i];
5993
5994                         for (j = 0, k = 0; j < MAX_RBR_RING_SIZE; j++) {
5995                                 struct page *page;
5996
5997                                 page = rp->rxhash[j];
5998                                 while (page) {
5999                                         struct page *next =
6000                                                 (struct page *) page->mapping;
6001                                         u64 base = page->index;
6002                                         base = base >> RBR_DESCR_ADDR_SHIFT;
6003                                         rp->rbr[k++] = cpu_to_le32(base);
6004                                         page = next;
6005                                 }
6006                         }
6007                         for (; k < MAX_RBR_RING_SIZE; k++) {
6008                                 err = niu_rbr_add_page(np, rp, GFP_ATOMIC, k);
6009                                 if (unlikely(err))
6010                                         break;
6011                         }
6012
6013                         rp->rbr_index = rp->rbr_table_size - 1;
6014                         rp->rcr_index = 0;
6015                         rp->rbr_pending = 0;
6016                         rp->rbr_refill_pending = 0;
6017                 }
6018         }
6019         if (np->tx_rings) {
6020                 for (i = 0; i < np->num_tx_rings; i++) {
6021                         struct tx_ring_info *rp = &np->tx_rings[i];
6022
6023                         for (j = 0; j < MAX_TX_RING_SIZE; j++) {
6024                                 if (rp->tx_buffs[j].skb)
6025                                         (void) release_tx_packet(np, rp, j);
6026                         }
6027
6028                         rp->pending = MAX_TX_RING_SIZE;
6029                         rp->prod = 0;
6030                         rp->cons = 0;
6031                         rp->wrap_bit = 0;
6032                 }
6033         }
6034 }
6035
6036 static void niu_reset_task(struct work_struct *work)
6037 {
6038         struct niu *np = container_of(work, struct niu, reset_task);
6039         unsigned long flags;
6040         int err;
6041
6042         spin_lock_irqsave(&np->lock, flags);
6043         if (!netif_running(np->dev)) {
6044                 spin_unlock_irqrestore(&np->lock, flags);
6045                 return;
6046         }
6047
6048         spin_unlock_irqrestore(&np->lock, flags);
6049
6050         del_timer_sync(&np->timer);
6051
6052         niu_netif_stop(np);
6053
6054         spin_lock_irqsave(&np->lock, flags);
6055
6056         niu_stop_hw(np);
6057
6058         spin_unlock_irqrestore(&np->lock, flags);
6059
6060         niu_reset_buffers(np);
6061
6062         spin_lock_irqsave(&np->lock, flags);
6063
6064         err = niu_init_hw(np);
6065         if (!err) {
6066                 np->timer.expires = jiffies + HZ;
6067                 add_timer(&np->timer);
6068                 niu_netif_start(np);
6069         }
6070
6071         spin_unlock_irqrestore(&np->lock, flags);
6072 }
6073
6074 static void niu_tx_timeout(struct net_device *dev)
6075 {
6076         struct niu *np = netdev_priv(dev);
6077
6078         dev_err(np->device, PFX "%s: Transmit timed out, resetting\n",
6079                 dev->name);
6080
6081         schedule_work(&np->reset_task);
6082 }
6083
6084 static void niu_set_txd(struct tx_ring_info *rp, int index,
6085                         u64 mapping, u64 len, u64 mark,
6086                         u64 n_frags)
6087 {
6088         __le64 *desc = &rp->descr[index];
6089
6090         *desc = cpu_to_le64(mark |
6091                             (n_frags << TX_DESC_NUM_PTR_SHIFT) |
6092                             (len << TX_DESC_TR_LEN_SHIFT) |
6093                             (mapping & TX_DESC_SAD));
6094 }
6095
6096 static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr,
6097                                 u64 pad_bytes, u64 len)
6098 {
6099         u16 eth_proto, eth_proto_inner;
6100         u64 csum_bits, l3off, ihl, ret;
6101         u8 ip_proto;
6102         int ipv6;
6103
6104         eth_proto = be16_to_cpu(ehdr->h_proto);
6105         eth_proto_inner = eth_proto;
6106         if (eth_proto == ETH_P_8021Q) {
6107                 struct vlan_ethhdr *vp = (struct vlan_ethhdr *) ehdr;
6108                 __be16 val = vp->h_vlan_encapsulated_proto;
6109
6110                 eth_proto_inner = be16_to_cpu(val);
6111         }
6112
6113         ipv6 = ihl = 0;
6114         switch (skb->protocol) {
6115         case __constant_htons(ETH_P_IP):
6116                 ip_proto = ip_hdr(skb)->protocol;
6117                 ihl = ip_hdr(skb)->ihl;
6118                 break;
6119         case __constant_htons(ETH_P_IPV6):
6120                 ip_proto = ipv6_hdr(skb)->nexthdr;
6121                 ihl = (40 >> 2);
6122                 ipv6 = 1;
6123                 break;
6124         default:
6125                 ip_proto = ihl = 0;
6126                 break;
6127         }
6128
6129         csum_bits = TXHDR_CSUM_NONE;
6130         if (skb->ip_summed == CHECKSUM_PARTIAL) {
6131                 u64 start, stuff;
6132
6133                 csum_bits = (ip_proto == IPPROTO_TCP ?
6134                              TXHDR_CSUM_TCP :
6135                              (ip_proto == IPPROTO_UDP ?
6136                               TXHDR_CSUM_UDP : TXHDR_CSUM_SCTP));
6137
6138                 start = skb_transport_offset(skb) -
6139                         (pad_bytes + sizeof(struct tx_pkt_hdr));
6140                 stuff = start + skb->csum_offset;
6141
6142                 csum_bits |= (start / 2) << TXHDR_L4START_SHIFT;
6143                 csum_bits |= (stuff / 2) << TXHDR_L4STUFF_SHIFT;
6144         }
6145
6146         l3off = skb_network_offset(skb) -
6147                 (pad_bytes + sizeof(struct tx_pkt_hdr));
6148
6149         ret = (((pad_bytes / 2) << TXHDR_PAD_SHIFT) |
6150                (len << TXHDR_LEN_SHIFT) |
6151                ((l3off / 2) << TXHDR_L3START_SHIFT) |
6152                (ihl << TXHDR_IHL_SHIFT) |
6153                ((eth_proto_inner < 1536) ? TXHDR_LLC : 0) |
6154                ((eth_proto == ETH_P_8021Q) ? TXHDR_VLAN : 0) |
6155                (ipv6 ? TXHDR_IP_VER : 0) |
6156                csum_bits);
6157
6158         return ret;
6159 }
6160
6161 static int niu_start_xmit(struct sk_buff *skb, struct net_device *dev)
6162 {
6163         struct niu *np = netdev_priv(dev);
6164         unsigned long align, headroom;
6165         struct netdev_queue *txq;
6166         struct tx_ring_info *rp;
6167         struct tx_pkt_hdr *tp;
6168         unsigned int len, nfg;
6169         struct ethhdr *ehdr;
6170         int prod, i, tlen;
6171         u64 mapping, mrk;
6172
6173         i = skb_get_queue_mapping(skb);
6174         rp = &np->tx_rings[i];
6175         txq = netdev_get_tx_queue(dev, i);
6176
6177         if (niu_tx_avail(rp) <= (skb_shinfo(skb)->nr_frags + 1)) {
6178                 netif_tx_stop_queue(txq);
6179                 dev_err(np->device, PFX "%s: BUG! Tx ring full when "
6180                         "queue awake!\n", dev->name);
6181                 rp->tx_errors++;
6182                 return NETDEV_TX_BUSY;
6183         }
6184
6185         if (skb->len < ETH_ZLEN) {
6186                 unsigned int pad_bytes = ETH_ZLEN - skb->len;
6187
6188                 if (skb_pad(skb, pad_bytes))
6189                         goto out;
6190                 skb_put(skb, pad_bytes);
6191         }
6192
6193         len = sizeof(struct tx_pkt_hdr) + 15;
6194         if (skb_headroom(skb) < len) {
6195                 struct sk_buff *skb_new;
6196
6197                 skb_new = skb_realloc_headroom(skb, len);
6198                 if (!skb_new) {
6199                         rp->tx_errors++;
6200                         goto out_drop;
6201                 }
6202                 kfree_skb(skb);
6203                 skb = skb_new;
6204         } else
6205                 skb_orphan(skb);
6206
6207         align = ((unsigned long) skb->data & (16 - 1));
6208         headroom = align + sizeof(struct tx_pkt_hdr);
6209
6210         ehdr = (struct ethhdr *) skb->data;
6211         tp = (struct tx_pkt_hdr *) skb_push(skb, headroom);
6212
6213         len = skb->len - sizeof(struct tx_pkt_hdr);
6214         tp->flags = cpu_to_le64(niu_compute_tx_flags(skb, ehdr, align, len));
6215         tp->resv = 0;
6216
6217         len = skb_headlen(skb);
6218         mapping = np->ops->map_single(np->device, skb->data,
6219                                       len, DMA_TO_DEVICE);
6220
6221         prod = rp->prod;
6222
6223         rp->tx_buffs[prod].skb = skb;
6224         rp->tx_buffs[prod].mapping = mapping;
6225
6226         mrk = TX_DESC_SOP;
6227         if (++rp->mark_counter == rp->mark_freq) {
6228                 rp->mark_counter = 0;
6229                 mrk |= TX_DESC_MARK;
6230                 rp->mark_pending++;
6231         }
6232
6233         tlen = len;
6234         nfg = skb_shinfo(skb)->nr_frags;
6235         while (tlen > 0) {
6236                 tlen -= MAX_TX_DESC_LEN;
6237                 nfg++;
6238         }
6239
6240         while (len > 0) {
6241                 unsigned int this_len = len;
6242
6243                 if (this_len > MAX_TX_DESC_LEN)
6244                         this_len = MAX_TX_DESC_LEN;
6245
6246                 niu_set_txd(rp, prod, mapping, this_len, mrk, nfg);
6247                 mrk = nfg = 0;
6248
6249                 prod = NEXT_TX(rp, prod);
6250                 mapping += this_len;
6251                 len -= this_len;
6252         }
6253
6254         for (i = 0; i <  skb_shinfo(skb)->nr_frags; i++) {
6255                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6256
6257                 len = frag->size;
6258                 mapping = np->ops->map_page(np->device, frag->page,
6259                                             frag->page_offset, len,
6260                                             DMA_TO_DEVICE);
6261
6262                 rp->tx_buffs[prod].skb = NULL;
6263                 rp->tx_buffs[prod].mapping = mapping;
6264
6265                 niu_set_txd(rp, prod, mapping, len, 0, 0);
6266
6267                 prod = NEXT_TX(rp, prod);
6268         }
6269
6270         if (prod < rp->prod)
6271                 rp->wrap_bit ^= TX_RING_KICK_WRAP;
6272         rp->prod = prod;
6273
6274         nw64(TX_RING_KICK(rp->tx_channel), rp->wrap_bit | (prod << 3));
6275
6276         if (unlikely(niu_tx_avail(rp) <= (MAX_SKB_FRAGS + 1))) {
6277                 netif_tx_stop_queue(txq);
6278                 if (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp))
6279                         netif_tx_wake_queue(txq);
6280         }
6281
6282         dev->trans_start = jiffies;
6283
6284 out:
6285         return NETDEV_TX_OK;
6286
6287 out_drop:
6288         rp->tx_errors++;
6289         kfree_skb(skb);
6290         goto out;
6291 }
6292
6293 static int niu_change_mtu(struct net_device *dev, int new_mtu)
6294 {
6295         struct niu *np = netdev_priv(dev);
6296         int err, orig_jumbo, new_jumbo;
6297
6298         if (new_mtu < 68 || new_mtu > NIU_MAX_MTU)
6299                 return -EINVAL;
6300
6301         orig_jumbo = (dev->mtu > ETH_DATA_LEN);
6302         new_jumbo = (new_mtu > ETH_DATA_LEN);
6303
6304         dev->mtu = new_mtu;
6305
6306         if (!netif_running(dev) ||
6307             (orig_jumbo == new_jumbo))
6308                 return 0;
6309
6310         niu_full_shutdown(np, dev);
6311
6312         niu_free_channels(np);
6313
6314         niu_enable_napi(np);
6315
6316         err = niu_alloc_channels(np);
6317         if (err)
6318                 return err;
6319
6320         spin_lock_irq(&np->lock);
6321
6322         err = niu_init_hw(np);
6323         if (!err) {
6324                 init_timer(&np->timer);
6325                 np->timer.expires = jiffies + HZ;
6326                 np->timer.data = (unsigned long) np;
6327                 np->timer.function = niu_timer;
6328
6329                 err = niu_enable_interrupts(np, 1);
6330                 if (err)
6331                         niu_stop_hw(np);
6332         }
6333
6334         spin_unlock_irq(&np->lock);
6335
6336         if (!err) {
6337                 netif_tx_start_all_queues(dev);
6338                 if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
6339                         netif_carrier_on(dev);
6340
6341                 add_timer(&np->timer);
6342         }
6343
6344         return err;
6345 }
6346
6347 static void niu_get_drvinfo(struct net_device *dev,
6348                             struct ethtool_drvinfo *info)
6349 {
6350         struct niu *np = netdev_priv(dev);
6351         struct niu_vpd *vpd = &np->vpd;
6352
6353         strcpy(info->driver, DRV_MODULE_NAME);
6354         strcpy(info->version, DRV_MODULE_VERSION);
6355         sprintf(info->fw_version, "%d.%d",
6356                 vpd->fcode_major, vpd->fcode_minor);
6357         if (np->parent->plat_type != PLAT_TYPE_NIU)
6358                 strcpy(info->bus_info, pci_name(np->pdev));
6359 }
6360
6361 static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6362 {
6363         struct niu *np = netdev_priv(dev);
6364         struct niu_link_config *lp;
6365
6366         lp = &np->link_config;
6367
6368         memset(cmd, 0, sizeof(*cmd));
6369         cmd->phy_address = np->phy_addr;
6370         cmd->supported = lp->supported;
6371         cmd->advertising = lp->advertising;
6372         cmd->autoneg = lp->autoneg;
6373         cmd->speed = lp->active_speed;
6374         cmd->duplex = lp->active_duplex;
6375
6376         return 0;
6377 }
6378
6379 static int niu_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6380 {
6381         return -EINVAL;
6382 }
6383
6384 static u32 niu_get_msglevel(struct net_device *dev)
6385 {
6386         struct niu *np = netdev_priv(dev);
6387         return np->msg_enable;
6388 }
6389
6390 static void niu_set_msglevel(struct net_device *dev, u32 value)
6391 {
6392         struct niu *np = netdev_priv(dev);
6393         np->msg_enable = value;
6394 }
6395
6396 static int niu_get_eeprom_len(struct net_device *dev)
6397 {
6398         struct niu *np = netdev_priv(dev);
6399
6400         return np->eeprom_len;
6401 }
6402
6403 static int niu_get_eeprom(struct net_device *dev,
6404                           struct ethtool_eeprom *eeprom, u8 *data)
6405 {
6406         struct niu *np = netdev_priv(dev);
6407         u32 offset, len, val;
6408
6409         offset = eeprom->offset;
6410         len = eeprom->len;
6411
6412         if (offset + len < offset)
6413                 return -EINVAL;
6414         if (offset >= np->eeprom_len)
6415                 return -EINVAL;
6416         if (offset + len > np->eeprom_len)
6417                 len = eeprom->len = np->eeprom_len - offset;
6418
6419         if (offset & 3) {
6420                 u32 b_offset, b_count;
6421
6422                 b_offset = offset & 3;
6423                 b_count = 4 - b_offset;
6424                 if (b_count > len)
6425                         b_count = len;
6426
6427                 val = nr64(ESPC_NCR((offset - b_offset) / 4));
6428                 memcpy(data, ((char *)&val) + b_offset, b_count);
6429                 data += b_count;
6430                 len -= b_count;
6431                 offset += b_count;
6432         }
6433         while (len >= 4) {
6434                 val = nr64(ESPC_NCR(offset / 4));
6435                 memcpy(data, &val, 4);
6436                 data += 4;
6437                 len -= 4;
6438                 offset += 4;
6439         }
6440         if (len) {
6441                 val = nr64(ESPC_NCR(offset / 4));
6442                 memcpy(data, &val, len);
6443         }
6444         return 0;
6445 }
6446
6447 static int niu_ethflow_to_class(int flow_type, u64 *class)
6448 {
6449         switch (flow_type) {
6450         case TCP_V4_FLOW:
6451                 *class = CLASS_CODE_TCP_IPV4;
6452                 break;
6453         case UDP_V4_FLOW:
6454                 *class = CLASS_CODE_UDP_IPV4;
6455                 break;
6456         case AH_ESP_V4_FLOW: