r8169: use device dependent methods to access the MII registers.
[linux-2.6.git] / drivers / net / r8169.c
1 /*
2  * r8169.c: RealTek 8169/8168/8101 ethernet driver.
3  *
4  * Copyright (c) 2002 ShuChen <shuchen@realtek.com.tw>
5  * Copyright (c) 2003 - 2007 Francois Romieu <romieu@fr.zoreil.com>
6  * Copyright (c) a lot of people too. Please respect their work.
7  *
8  * See MAINTAINERS file for support contact information.
9  */
10
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/pci.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/delay.h>
17 #include <linux/ethtool.h>
18 #include <linux/mii.h>
19 #include <linux/if_vlan.h>
20 #include <linux/crc32.h>
21 #include <linux/in.h>
22 #include <linux/ip.h>
23 #include <linux/tcp.h>
24 #include <linux/init.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/firmware.h>
28
29 #include <asm/system.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32
33 #define RTL8169_VERSION "2.3LK-NAPI"
34 #define MODULENAME "r8169"
35 #define PFX MODULENAME ": "
36
37 #define FIRMWARE_8168D_1        "rtl_nic/rtl8168d-1.fw"
38 #define FIRMWARE_8168D_2        "rtl_nic/rtl8168d-2.fw"
39
40 #ifdef RTL8169_DEBUG
41 #define assert(expr) \
42         if (!(expr)) {                                  \
43                 printk( "Assertion failed! %s,%s,%s,line=%d\n", \
44                 #expr,__FILE__,__func__,__LINE__);              \
45         }
46 #define dprintk(fmt, args...) \
47         do { printk(KERN_DEBUG PFX fmt, ## args); } while (0)
48 #else
49 #define assert(expr) do {} while (0)
50 #define dprintk(fmt, args...)   do {} while (0)
51 #endif /* RTL8169_DEBUG */
52
53 #define R8169_MSG_DEFAULT \
54         (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
55
56 #define TX_BUFFS_AVAIL(tp) \
57         (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1)
58
59 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
60    The RTL chips use a 64 element hash table based on the Ethernet CRC. */
61 static const int multicast_filter_limit = 32;
62
63 /* MAC address length */
64 #define MAC_ADDR_LEN    6
65
66 #define MAX_READ_REQUEST_SHIFT  12
67 #define RX_FIFO_THRESH  7       /* 7 means NO threshold, Rx buffer level before first PCI xfer. */
68 #define RX_DMA_BURST    6       /* Maximum PCI burst, '6' is 1024 */
69 #define TX_DMA_BURST    6       /* Maximum PCI burst, '6' is 1024 */
70 #define SafeMtu         0x1c20  /* ... actually life sucks beyond ~7k */
71 #define InterFrameGap   0x03    /* 3 means InterFrameGap = the shortest one */
72
73 #define R8169_REGS_SIZE         256
74 #define R8169_NAPI_WEIGHT       64
75 #define NUM_TX_DESC     64      /* Number of Tx descriptor registers */
76 #define NUM_RX_DESC     256     /* Number of Rx descriptor registers */
77 #define RX_BUF_SIZE     1536    /* Rx Buffer size */
78 #define R8169_TX_RING_BYTES     (NUM_TX_DESC * sizeof(struct TxDesc))
79 #define R8169_RX_RING_BYTES     (NUM_RX_DESC * sizeof(struct RxDesc))
80
81 #define RTL8169_TX_TIMEOUT      (6*HZ)
82 #define RTL8169_PHY_TIMEOUT     (10*HZ)
83
84 #define RTL_EEPROM_SIG          cpu_to_le32(0x8129)
85 #define RTL_EEPROM_SIG_MASK     cpu_to_le32(0xffff)
86 #define RTL_EEPROM_SIG_ADDR     0x0000
87
88 /* write/read MMIO register */
89 #define RTL_W8(reg, val8)       writeb ((val8), ioaddr + (reg))
90 #define RTL_W16(reg, val16)     writew ((val16), ioaddr + (reg))
91 #define RTL_W32(reg, val32)     writel ((val32), ioaddr + (reg))
92 #define RTL_R8(reg)             readb (ioaddr + (reg))
93 #define RTL_R16(reg)            readw (ioaddr + (reg))
94 #define RTL_R32(reg)            readl (ioaddr + (reg))
95
96 enum mac_version {
97         RTL_GIGA_MAC_NONE   = 0x00,
98         RTL_GIGA_MAC_VER_01 = 0x01, // 8169
99         RTL_GIGA_MAC_VER_02 = 0x02, // 8169S
100         RTL_GIGA_MAC_VER_03 = 0x03, // 8110S
101         RTL_GIGA_MAC_VER_04 = 0x04, // 8169SB
102         RTL_GIGA_MAC_VER_05 = 0x05, // 8110SCd
103         RTL_GIGA_MAC_VER_06 = 0x06, // 8110SCe
104         RTL_GIGA_MAC_VER_07 = 0x07, // 8102e
105         RTL_GIGA_MAC_VER_08 = 0x08, // 8102e
106         RTL_GIGA_MAC_VER_09 = 0x09, // 8102e
107         RTL_GIGA_MAC_VER_10 = 0x0a, // 8101e
108         RTL_GIGA_MAC_VER_11 = 0x0b, // 8168Bb
109         RTL_GIGA_MAC_VER_12 = 0x0c, // 8168Be
110         RTL_GIGA_MAC_VER_13 = 0x0d, // 8101Eb
111         RTL_GIGA_MAC_VER_14 = 0x0e, // 8101 ?
112         RTL_GIGA_MAC_VER_15 = 0x0f, // 8101 ?
113         RTL_GIGA_MAC_VER_16 = 0x11, // 8101Ec
114         RTL_GIGA_MAC_VER_17 = 0x10, // 8168Bf
115         RTL_GIGA_MAC_VER_18 = 0x12, // 8168CP
116         RTL_GIGA_MAC_VER_19 = 0x13, // 8168C
117         RTL_GIGA_MAC_VER_20 = 0x14, // 8168C
118         RTL_GIGA_MAC_VER_21 = 0x15, // 8168C
119         RTL_GIGA_MAC_VER_22 = 0x16, // 8168C
120         RTL_GIGA_MAC_VER_23 = 0x17, // 8168CP
121         RTL_GIGA_MAC_VER_24 = 0x18, // 8168CP
122         RTL_GIGA_MAC_VER_25 = 0x19, // 8168D
123         RTL_GIGA_MAC_VER_26 = 0x1a, // 8168D
124         RTL_GIGA_MAC_VER_27 = 0x1b  // 8168DP
125 };
126
127 #define _R(NAME,MAC,MASK) \
128         { .name = NAME, .mac_version = MAC, .RxConfigMask = MASK }
129
130 static const struct {
131         const char *name;
132         u8 mac_version;
133         u32 RxConfigMask;       /* Clears the bits supported by this chip */
134 } rtl_chip_info[] = {
135         _R("RTL8169",           RTL_GIGA_MAC_VER_01, 0xff7e1880), // 8169
136         _R("RTL8169s",          RTL_GIGA_MAC_VER_02, 0xff7e1880), // 8169S
137         _R("RTL8110s",          RTL_GIGA_MAC_VER_03, 0xff7e1880), // 8110S
138         _R("RTL8169sb/8110sb",  RTL_GIGA_MAC_VER_04, 0xff7e1880), // 8169SB
139         _R("RTL8169sc/8110sc",  RTL_GIGA_MAC_VER_05, 0xff7e1880), // 8110SCd
140         _R("RTL8169sc/8110sc",  RTL_GIGA_MAC_VER_06, 0xff7e1880), // 8110SCe
141         _R("RTL8102e",          RTL_GIGA_MAC_VER_07, 0xff7e1880), // PCI-E
142         _R("RTL8102e",          RTL_GIGA_MAC_VER_08, 0xff7e1880), // PCI-E
143         _R("RTL8102e",          RTL_GIGA_MAC_VER_09, 0xff7e1880), // PCI-E
144         _R("RTL8101e",          RTL_GIGA_MAC_VER_10, 0xff7e1880), // PCI-E
145         _R("RTL8168b/8111b",    RTL_GIGA_MAC_VER_11, 0xff7e1880), // PCI-E
146         _R("RTL8168b/8111b",    RTL_GIGA_MAC_VER_12, 0xff7e1880), // PCI-E
147         _R("RTL8101e",          RTL_GIGA_MAC_VER_13, 0xff7e1880), // PCI-E 8139
148         _R("RTL8100e",          RTL_GIGA_MAC_VER_14, 0xff7e1880), // PCI-E 8139
149         _R("RTL8100e",          RTL_GIGA_MAC_VER_15, 0xff7e1880), // PCI-E 8139
150         _R("RTL8168b/8111b",    RTL_GIGA_MAC_VER_17, 0xff7e1880), // PCI-E
151         _R("RTL8101e",          RTL_GIGA_MAC_VER_16, 0xff7e1880), // PCI-E
152         _R("RTL8168cp/8111cp",  RTL_GIGA_MAC_VER_18, 0xff7e1880), // PCI-E
153         _R("RTL8168c/8111c",    RTL_GIGA_MAC_VER_19, 0xff7e1880), // PCI-E
154         _R("RTL8168c/8111c",    RTL_GIGA_MAC_VER_20, 0xff7e1880), // PCI-E
155         _R("RTL8168c/8111c",    RTL_GIGA_MAC_VER_21, 0xff7e1880), // PCI-E
156         _R("RTL8168c/8111c",    RTL_GIGA_MAC_VER_22, 0xff7e1880), // PCI-E
157         _R("RTL8168cp/8111cp",  RTL_GIGA_MAC_VER_23, 0xff7e1880), // PCI-E
158         _R("RTL8168cp/8111cp",  RTL_GIGA_MAC_VER_24, 0xff7e1880), // PCI-E
159         _R("RTL8168d/8111d",    RTL_GIGA_MAC_VER_25, 0xff7e1880), // PCI-E
160         _R("RTL8168d/8111d",    RTL_GIGA_MAC_VER_26, 0xff7e1880), // PCI-E
161         _R("RTL8168dp/8111dp",  RTL_GIGA_MAC_VER_27, 0xff7e1880)  // PCI-E
162 };
163 #undef _R
164
165 enum cfg_version {
166         RTL_CFG_0 = 0x00,
167         RTL_CFG_1,
168         RTL_CFG_2
169 };
170
171 static void rtl_hw_start_8169(struct net_device *);
172 static void rtl_hw_start_8168(struct net_device *);
173 static void rtl_hw_start_8101(struct net_device *);
174
175 static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
176         { PCI_DEVICE(PCI_VENDOR_ID_REALTEK,     0x8129), 0, 0, RTL_CFG_0 },
177         { PCI_DEVICE(PCI_VENDOR_ID_REALTEK,     0x8136), 0, 0, RTL_CFG_2 },
178         { PCI_DEVICE(PCI_VENDOR_ID_REALTEK,     0x8167), 0, 0, RTL_CFG_0 },
179         { PCI_DEVICE(PCI_VENDOR_ID_REALTEK,     0x8168), 0, 0, RTL_CFG_1 },
180         { PCI_DEVICE(PCI_VENDOR_ID_REALTEK,     0x8169), 0, 0, RTL_CFG_0 },
181         { PCI_DEVICE(PCI_VENDOR_ID_DLINK,       0x4300), 0, 0, RTL_CFG_0 },
182         { PCI_DEVICE(PCI_VENDOR_ID_AT,          0xc107), 0, 0, RTL_CFG_0 },
183         { PCI_DEVICE(0x16ec,                    0x0116), 0, 0, RTL_CFG_0 },
184         { PCI_VENDOR_ID_LINKSYS,                0x1032,
185                 PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 },
186         { 0x0001,                               0x8168,
187                 PCI_ANY_ID, 0x2410, 0, 0, RTL_CFG_2 },
188         {0,},
189 };
190
191 MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
192
193 static int rx_buf_sz = 16383;
194 static int use_dac;
195 static struct {
196         u32 msg_enable;
197 } debug = { -1 };
198
199 enum rtl_registers {
200         MAC0            = 0,    /* Ethernet hardware address. */
201         MAC4            = 4,
202         MAR0            = 8,    /* Multicast filter. */
203         CounterAddrLow          = 0x10,
204         CounterAddrHigh         = 0x14,
205         TxDescStartAddrLow      = 0x20,
206         TxDescStartAddrHigh     = 0x24,
207         TxHDescStartAddrLow     = 0x28,
208         TxHDescStartAddrHigh    = 0x2c,
209         FLASH           = 0x30,
210         ERSR            = 0x36,
211         ChipCmd         = 0x37,
212         TxPoll          = 0x38,
213         IntrMask        = 0x3c,
214         IntrStatus      = 0x3e,
215         TxConfig        = 0x40,
216         RxConfig        = 0x44,
217         RxMissed        = 0x4c,
218         Cfg9346         = 0x50,
219         Config0         = 0x51,
220         Config1         = 0x52,
221         Config2         = 0x53,
222         Config3         = 0x54,
223         Config4         = 0x55,
224         Config5         = 0x56,
225         MultiIntr       = 0x5c,
226         PHYAR           = 0x60,
227         PHYstatus       = 0x6c,
228         RxMaxSize       = 0xda,
229         CPlusCmd        = 0xe0,
230         IntrMitigate    = 0xe2,
231         RxDescAddrLow   = 0xe4,
232         RxDescAddrHigh  = 0xe8,
233         EarlyTxThres    = 0xec, /* 8169. Unit of 32 bytes. */
234
235 #define NoEarlyTx       0x3f    /* Max value : no early transmit. */
236
237         MaxTxPacketSize = 0xec, /* 8101/8168. Unit of 128 bytes. */
238
239 #define TxPacketMax     (8064 >> 7)
240
241         FuncEvent       = 0xf0,
242         FuncEventMask   = 0xf4,
243         FuncPresetState = 0xf8,
244         FuncForceEvent  = 0xfc,
245 };
246
247 enum rtl8110_registers {
248         TBICSR                  = 0x64,
249         TBI_ANAR                = 0x68,
250         TBI_LPAR                = 0x6a,
251 };
252
253 enum rtl8168_8101_registers {
254         CSIDR                   = 0x64,
255         CSIAR                   = 0x68,
256 #define CSIAR_FLAG                      0x80000000
257 #define CSIAR_WRITE_CMD                 0x80000000
258 #define CSIAR_BYTE_ENABLE               0x0f
259 #define CSIAR_BYTE_ENABLE_SHIFT         12
260 #define CSIAR_ADDR_MASK                 0x0fff
261
262         EPHYAR                  = 0x80,
263 #define EPHYAR_FLAG                     0x80000000
264 #define EPHYAR_WRITE_CMD                0x80000000
265 #define EPHYAR_REG_MASK                 0x1f
266 #define EPHYAR_REG_SHIFT                16
267 #define EPHYAR_DATA_MASK                0xffff
268         DBG_REG                 = 0xd1,
269 #define FIX_NAK_1                       (1 << 4)
270 #define FIX_NAK_2                       (1 << 3)
271         EFUSEAR                 = 0xdc,
272 #define EFUSEAR_FLAG                    0x80000000
273 #define EFUSEAR_WRITE_CMD               0x80000000
274 #define EFUSEAR_READ_CMD                0x00000000
275 #define EFUSEAR_REG_MASK                0x03ff
276 #define EFUSEAR_REG_SHIFT               8
277 #define EFUSEAR_DATA_MASK               0xff
278 };
279
280 enum rtl_register_content {
281         /* InterruptStatusBits */
282         SYSErr          = 0x8000,
283         PCSTimeout      = 0x4000,
284         SWInt           = 0x0100,
285         TxDescUnavail   = 0x0080,
286         RxFIFOOver      = 0x0040,
287         LinkChg         = 0x0020,
288         RxOverflow      = 0x0010,
289         TxErr           = 0x0008,
290         TxOK            = 0x0004,
291         RxErr           = 0x0002,
292         RxOK            = 0x0001,
293
294         /* RxStatusDesc */
295         RxFOVF  = (1 << 23),
296         RxRWT   = (1 << 22),
297         RxRES   = (1 << 21),
298         RxRUNT  = (1 << 20),
299         RxCRC   = (1 << 19),
300
301         /* ChipCmdBits */
302         CmdReset        = 0x10,
303         CmdRxEnb        = 0x08,
304         CmdTxEnb        = 0x04,
305         RxBufEmpty      = 0x01,
306
307         /* TXPoll register p.5 */
308         HPQ             = 0x80,         /* Poll cmd on the high prio queue */
309         NPQ             = 0x40,         /* Poll cmd on the low prio queue */
310         FSWInt          = 0x01,         /* Forced software interrupt */
311
312         /* Cfg9346Bits */
313         Cfg9346_Lock    = 0x00,
314         Cfg9346_Unlock  = 0xc0,
315
316         /* rx_mode_bits */
317         AcceptErr       = 0x20,
318         AcceptRunt      = 0x10,
319         AcceptBroadcast = 0x08,
320         AcceptMulticast = 0x04,
321         AcceptMyPhys    = 0x02,
322         AcceptAllPhys   = 0x01,
323
324         /* RxConfigBits */
325         RxCfgFIFOShift  = 13,
326         RxCfgDMAShift   =  8,
327
328         /* TxConfigBits */
329         TxInterFrameGapShift = 24,
330         TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
331
332         /* Config1 register p.24 */
333         LEDS1           = (1 << 7),
334         LEDS0           = (1 << 6),
335         MSIEnable       = (1 << 5),     /* Enable Message Signaled Interrupt */
336         Speed_down      = (1 << 4),
337         MEMMAP          = (1 << 3),
338         IOMAP           = (1 << 2),
339         VPD             = (1 << 1),
340         PMEnable        = (1 << 0),     /* Power Management Enable */
341
342         /* Config2 register p. 25 */
343         PCI_Clock_66MHz = 0x01,
344         PCI_Clock_33MHz = 0x00,
345
346         /* Config3 register p.25 */
347         MagicPacket     = (1 << 5),     /* Wake up when receives a Magic Packet */
348         LinkUp          = (1 << 4),     /* Wake up when the cable connection is re-established */
349         Beacon_en       = (1 << 0),     /* 8168 only. Reserved in the 8168b */
350
351         /* Config5 register p.27 */
352         BWF             = (1 << 6),     /* Accept Broadcast wakeup frame */
353         MWF             = (1 << 5),     /* Accept Multicast wakeup frame */
354         UWF             = (1 << 4),     /* Accept Unicast wakeup frame */
355         LanWake         = (1 << 1),     /* LanWake enable/disable */
356         PMEStatus       = (1 << 0),     /* PME status can be reset by PCI RST# */
357
358         /* TBICSR p.28 */
359         TBIReset        = 0x80000000,
360         TBILoopback     = 0x40000000,
361         TBINwEnable     = 0x20000000,
362         TBINwRestart    = 0x10000000,
363         TBILinkOk       = 0x02000000,
364         TBINwComplete   = 0x01000000,
365
366         /* CPlusCmd p.31 */
367         EnableBist      = (1 << 15),    // 8168 8101
368         Mac_dbgo_oe     = (1 << 14),    // 8168 8101
369         Normal_mode     = (1 << 13),    // unused
370         Force_half_dup  = (1 << 12),    // 8168 8101
371         Force_rxflow_en = (1 << 11),    // 8168 8101
372         Force_txflow_en = (1 << 10),    // 8168 8101
373         Cxpl_dbg_sel    = (1 << 9),     // 8168 8101
374         ASF             = (1 << 8),     // 8168 8101
375         PktCntrDisable  = (1 << 7),     // 8168 8101
376         Mac_dbgo_sel    = 0x001c,       // 8168
377         RxVlan          = (1 << 6),
378         RxChkSum        = (1 << 5),
379         PCIDAC          = (1 << 4),
380         PCIMulRW        = (1 << 3),
381         INTT_0          = 0x0000,       // 8168
382         INTT_1          = 0x0001,       // 8168
383         INTT_2          = 0x0002,       // 8168
384         INTT_3          = 0x0003,       // 8168
385
386         /* rtl8169_PHYstatus */
387         TBI_Enable      = 0x80,
388         TxFlowCtrl      = 0x40,
389         RxFlowCtrl      = 0x20,
390         _1000bpsF       = 0x10,
391         _100bps         = 0x08,
392         _10bps          = 0x04,
393         LinkStatus      = 0x02,
394         FullDup         = 0x01,
395
396         /* _TBICSRBit */
397         TBILinkOK       = 0x02000000,
398
399         /* DumpCounterCommand */
400         CounterDump     = 0x8,
401 };
402
403 enum desc_status_bit {
404         DescOwn         = (1 << 31), /* Descriptor is owned by NIC */
405         RingEnd         = (1 << 30), /* End of descriptor ring */
406         FirstFrag       = (1 << 29), /* First segment of a packet */
407         LastFrag        = (1 << 28), /* Final segment of a packet */
408
409         /* Tx private */
410         LargeSend       = (1 << 27), /* TCP Large Send Offload (TSO) */
411         MSSShift        = 16,        /* MSS value position */
412         MSSMask         = 0xfff,     /* MSS value + LargeSend bit: 12 bits */
413         IPCS            = (1 << 18), /* Calculate IP checksum */
414         UDPCS           = (1 << 17), /* Calculate UDP/IP checksum */
415         TCPCS           = (1 << 16), /* Calculate TCP/IP checksum */
416         TxVlanTag       = (1 << 17), /* Add VLAN tag */
417
418         /* Rx private */
419         PID1            = (1 << 18), /* Protocol ID bit 1/2 */
420         PID0            = (1 << 17), /* Protocol ID bit 2/2 */
421
422 #define RxProtoUDP      (PID1)
423 #define RxProtoTCP      (PID0)
424 #define RxProtoIP       (PID1 | PID0)
425 #define RxProtoMask     RxProtoIP
426
427         IPFail          = (1 << 16), /* IP checksum failed */
428         UDPFail         = (1 << 15), /* UDP/IP checksum failed */
429         TCPFail         = (1 << 14), /* TCP/IP checksum failed */
430         RxVlanTag       = (1 << 16), /* VLAN tag available */
431 };
432
433 #define RsvdMask        0x3fffc000
434
435 struct TxDesc {
436         __le32 opts1;
437         __le32 opts2;
438         __le64 addr;
439 };
440
441 struct RxDesc {
442         __le32 opts1;
443         __le32 opts2;
444         __le64 addr;
445 };
446
447 struct ring_info {
448         struct sk_buff  *skb;
449         u32             len;
450         u8              __pad[sizeof(void *) - sizeof(u32)];
451 };
452
453 enum features {
454         RTL_FEATURE_WOL         = (1 << 0),
455         RTL_FEATURE_MSI         = (1 << 1),
456         RTL_FEATURE_GMII        = (1 << 2),
457 };
458
459 struct rtl8169_counters {
460         __le64  tx_packets;
461         __le64  rx_packets;
462         __le64  tx_errors;
463         __le32  rx_errors;
464         __le16  rx_missed;
465         __le16  align_errors;
466         __le32  tx_one_collision;
467         __le32  tx_multi_collision;
468         __le64  rx_unicast;
469         __le64  rx_broadcast;
470         __le32  rx_multicast;
471         __le16  tx_aborted;
472         __le16  tx_underun;
473 };
474
475 struct rtl8169_private {
476         void __iomem *mmio_addr;        /* memory map physical address */
477         struct pci_dev *pci_dev;        /* Index of PCI device */
478         struct net_device *dev;
479         struct napi_struct napi;
480         spinlock_t lock;                /* spin lock flag */
481         u32 msg_enable;
482         int chipset;
483         int mac_version;
484         u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
485         u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
486         u32 dirty_rx;
487         u32 dirty_tx;
488         struct TxDesc *TxDescArray;     /* 256-aligned Tx descriptor ring */
489         struct RxDesc *RxDescArray;     /* 256-aligned Rx descriptor ring */
490         dma_addr_t TxPhyAddr;
491         dma_addr_t RxPhyAddr;
492         void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */
493         struct ring_info tx_skb[NUM_TX_DESC];   /* Tx data buffers */
494         struct timer_list timer;
495         u16 cp_cmd;
496         u16 intr_event;
497         u16 napi_event;
498         u16 intr_mask;
499         int phy_1000_ctrl_reg;
500 #ifdef CONFIG_R8169_VLAN
501         struct vlan_group *vlgrp;
502 #endif
503         int (*set_speed)(struct net_device *, u8 autoneg, u16 speed, u8 duplex);
504         int (*get_settings)(struct net_device *, struct ethtool_cmd *);
505         void (*phy_reset_enable)(struct rtl8169_private *tp);
506         void (*hw_start)(struct net_device *);
507         unsigned int (*phy_reset_pending)(struct rtl8169_private *tp);
508         unsigned int (*link_ok)(void __iomem *);
509         int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd);
510         int pcie_cap;
511         struct delayed_work task;
512         unsigned features;
513
514         struct mii_if_info mii;
515         struct rtl8169_counters counters;
516         u32 saved_wolopts;
517 };
518
519 MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
520 MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
521 module_param(use_dac, int, 0);
522 MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
523 module_param_named(debug, debug.msg_enable, int, 0);
524 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
525 MODULE_LICENSE("GPL");
526 MODULE_VERSION(RTL8169_VERSION);
527 MODULE_FIRMWARE(FIRMWARE_8168D_1);
528 MODULE_FIRMWARE(FIRMWARE_8168D_2);
529
530 static int rtl8169_open(struct net_device *dev);
531 static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
532                                       struct net_device *dev);
533 static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance);
534 static int rtl8169_init_ring(struct net_device *dev);
535 static void rtl_hw_start(struct net_device *dev);
536 static int rtl8169_close(struct net_device *dev);
537 static void rtl_set_rx_mode(struct net_device *dev);
538 static void rtl8169_tx_timeout(struct net_device *dev);
539 static struct net_device_stats *rtl8169_get_stats(struct net_device *dev);
540 static int rtl8169_rx_interrupt(struct net_device *, struct rtl8169_private *,
541                                 void __iomem *, u32 budget);
542 static int rtl8169_change_mtu(struct net_device *dev, int new_mtu);
543 static void rtl8169_down(struct net_device *dev);
544 static void rtl8169_rx_clear(struct rtl8169_private *tp);
545 static int rtl8169_poll(struct napi_struct *napi, int budget);
546
547 static const unsigned int rtl8169_rx_config =
548         (RX_FIFO_THRESH << RxCfgFIFOShift) | (RX_DMA_BURST << RxCfgDMAShift);
549
550 static void r8169_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
551 {
552         int i;
553
554         RTL_W32(PHYAR, 0x80000000 | (reg_addr & 0x1f) << 16 | (value & 0xffff));
555
556         for (i = 20; i > 0; i--) {
557                 /*
558                  * Check if the RTL8169 has completed writing to the specified
559                  * MII register.
560                  */
561                 if (!(RTL_R32(PHYAR) & 0x80000000))
562                         break;
563                 udelay(25);
564         }
565         /*
566          * According to hardware specs a 20us delay is required after write
567          * complete indication, but before sending next command.
568          */
569         udelay(20);
570 }
571
572 static int r8169_mdio_read(void __iomem *ioaddr, int reg_addr)
573 {
574         int i, value = -1;
575
576         RTL_W32(PHYAR, 0x0 | (reg_addr & 0x1f) << 16);
577
578         for (i = 20; i > 0; i--) {
579                 /*
580                  * Check if the RTL8169 has completed retrieving data from
581                  * the specified MII register.
582                  */
583                 if (RTL_R32(PHYAR) & 0x80000000) {
584                         value = RTL_R32(PHYAR) & 0xffff;
585                         break;
586                 }
587                 udelay(25);
588         }
589         /*
590          * According to hardware specs a 20us delay is required after read
591          * complete indication, but before sending next command.
592          */
593         udelay(20);
594
595         return value;
596 }
597
598 static void rtl_writephy(struct rtl8169_private *tp, int location, u32 val)
599 {
600         r8169_mdio_write(tp->mmio_addr, location, val);
601 }
602
603 static int rtl_readphy(struct rtl8169_private *tp, int location)
604 {
605         return r8169_mdio_read(tp->mmio_addr, location);
606 }
607
608 static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value)
609 {
610         rtl_writephy(tp, reg_addr, rtl_readphy(tp, reg_addr) | value);
611 }
612
613 static void rtl_w1w0_phy(struct rtl8169_private *tp, int reg_addr, int p, int m)
614 {
615         int val;
616
617         val = rtl_readphy(tp, reg_addr);
618         rtl_writephy(tp, reg_addr, (val | p) & ~m);
619 }
620
621 static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
622                            int val)
623 {
624         struct rtl8169_private *tp = netdev_priv(dev);
625
626         rtl_writephy(tp, location, val);
627 }
628
629 static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
630 {
631         struct rtl8169_private *tp = netdev_priv(dev);
632
633         return rtl_readphy(tp, location);
634 }
635
636 static void rtl_ephy_write(void __iomem *ioaddr, int reg_addr, int value)
637 {
638         unsigned int i;
639
640         RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
641                 (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
642
643         for (i = 0; i < 100; i++) {
644                 if (!(RTL_R32(EPHYAR) & EPHYAR_FLAG))
645                         break;
646                 udelay(10);
647         }
648 }
649
650 static u16 rtl_ephy_read(void __iomem *ioaddr, int reg_addr)
651 {
652         u16 value = 0xffff;
653         unsigned int i;
654
655         RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
656
657         for (i = 0; i < 100; i++) {
658                 if (RTL_R32(EPHYAR) & EPHYAR_FLAG) {
659                         value = RTL_R32(EPHYAR) & EPHYAR_DATA_MASK;
660                         break;
661                 }
662                 udelay(10);
663         }
664
665         return value;
666 }
667
668 static void rtl_csi_write(void __iomem *ioaddr, int addr, int value)
669 {
670         unsigned int i;
671
672         RTL_W32(CSIDR, value);
673         RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
674                 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
675
676         for (i = 0; i < 100; i++) {
677                 if (!(RTL_R32(CSIAR) & CSIAR_FLAG))
678                         break;
679                 udelay(10);
680         }
681 }
682
683 static u32 rtl_csi_read(void __iomem *ioaddr, int addr)
684 {
685         u32 value = ~0x00;
686         unsigned int i;
687
688         RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
689                 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
690
691         for (i = 0; i < 100; i++) {
692                 if (RTL_R32(CSIAR) & CSIAR_FLAG) {
693                         value = RTL_R32(CSIDR);
694                         break;
695                 }
696                 udelay(10);
697         }
698
699         return value;
700 }
701
702 static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr)
703 {
704         u8 value = 0xff;
705         unsigned int i;
706
707         RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
708
709         for (i = 0; i < 300; i++) {
710                 if (RTL_R32(EFUSEAR) & EFUSEAR_FLAG) {
711                         value = RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK;
712                         break;
713                 }
714                 udelay(100);
715         }
716
717         return value;
718 }
719
720 static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr)
721 {
722         RTL_W16(IntrMask, 0x0000);
723
724         RTL_W16(IntrStatus, 0xffff);
725 }
726
727 static void rtl8169_asic_down(void __iomem *ioaddr)
728 {
729         RTL_W8(ChipCmd, 0x00);
730         rtl8169_irq_mask_and_ack(ioaddr);
731         RTL_R16(CPlusCmd);
732 }
733
734 static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
735 {
736         void __iomem *ioaddr = tp->mmio_addr;
737
738         return RTL_R32(TBICSR) & TBIReset;
739 }
740
741 static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp)
742 {
743         return rtl_readphy(tp, MII_BMCR) & BMCR_RESET;
744 }
745
746 static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr)
747 {
748         return RTL_R32(TBICSR) & TBILinkOk;
749 }
750
751 static unsigned int rtl8169_xmii_link_ok(void __iomem *ioaddr)
752 {
753         return RTL_R8(PHYstatus) & LinkStatus;
754 }
755
756 static void rtl8169_tbi_reset_enable(struct rtl8169_private *tp)
757 {
758         void __iomem *ioaddr = tp->mmio_addr;
759
760         RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset);
761 }
762
763 static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp)
764 {
765         unsigned int val;
766
767         val = rtl_readphy(tp, MII_BMCR) | BMCR_RESET;
768         rtl_writephy(tp, MII_BMCR, val & 0xffff);
769 }
770
771 static void __rtl8169_check_link_status(struct net_device *dev,
772                                       struct rtl8169_private *tp,
773                                       void __iomem *ioaddr,
774                                       bool pm)
775 {
776         unsigned long flags;
777
778         spin_lock_irqsave(&tp->lock, flags);
779         if (tp->link_ok(ioaddr)) {
780                 /* This is to cancel a scheduled suspend if there's one. */
781                 if (pm)
782                         pm_request_resume(&tp->pci_dev->dev);
783                 netif_carrier_on(dev);
784                 netif_info(tp, ifup, dev, "link up\n");
785         } else {
786                 netif_carrier_off(dev);
787                 netif_info(tp, ifdown, dev, "link down\n");
788                 if (pm)
789                         pm_schedule_suspend(&tp->pci_dev->dev, 100);
790         }
791         spin_unlock_irqrestore(&tp->lock, flags);
792 }
793
794 static void rtl8169_check_link_status(struct net_device *dev,
795                                       struct rtl8169_private *tp,
796                                       void __iomem *ioaddr)
797 {
798         __rtl8169_check_link_status(dev, tp, ioaddr, false);
799 }
800
801 #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
802
803 static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
804 {
805         void __iomem *ioaddr = tp->mmio_addr;
806         u8 options;
807         u32 wolopts = 0;
808
809         options = RTL_R8(Config1);
810         if (!(options & PMEnable))
811                 return 0;
812
813         options = RTL_R8(Config3);
814         if (options & LinkUp)
815                 wolopts |= WAKE_PHY;
816         if (options & MagicPacket)
817                 wolopts |= WAKE_MAGIC;
818
819         options = RTL_R8(Config5);
820         if (options & UWF)
821                 wolopts |= WAKE_UCAST;
822         if (options & BWF)
823                 wolopts |= WAKE_BCAST;
824         if (options & MWF)
825                 wolopts |= WAKE_MCAST;
826
827         return wolopts;
828 }
829
830 static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
831 {
832         struct rtl8169_private *tp = netdev_priv(dev);
833
834         spin_lock_irq(&tp->lock);
835
836         wol->supported = WAKE_ANY;
837         wol->wolopts = __rtl8169_get_wol(tp);
838
839         spin_unlock_irq(&tp->lock);
840 }
841
842 static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
843 {
844         void __iomem *ioaddr = tp->mmio_addr;
845         unsigned int i;
846         static const struct {
847                 u32 opt;
848                 u16 reg;
849                 u8  mask;
850         } cfg[] = {
851                 { WAKE_ANY,   Config1, PMEnable },
852                 { WAKE_PHY,   Config3, LinkUp },
853                 { WAKE_MAGIC, Config3, MagicPacket },
854                 { WAKE_UCAST, Config5, UWF },
855                 { WAKE_BCAST, Config5, BWF },
856                 { WAKE_MCAST, Config5, MWF },
857                 { WAKE_ANY,   Config5, LanWake }
858         };
859
860         RTL_W8(Cfg9346, Cfg9346_Unlock);
861
862         for (i = 0; i < ARRAY_SIZE(cfg); i++) {
863                 u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
864                 if (wolopts & cfg[i].opt)
865                         options |= cfg[i].mask;
866                 RTL_W8(cfg[i].reg, options);
867         }
868
869         RTL_W8(Cfg9346, Cfg9346_Lock);
870 }
871
872 static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
873 {
874         struct rtl8169_private *tp = netdev_priv(dev);
875
876         spin_lock_irq(&tp->lock);
877
878         if (wol->wolopts)
879                 tp->features |= RTL_FEATURE_WOL;
880         else
881                 tp->features &= ~RTL_FEATURE_WOL;
882         __rtl8169_set_wol(tp, wol->wolopts);
883         spin_unlock_irq(&tp->lock);
884
885         device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
886
887         return 0;
888 }
889
890 static void rtl8169_get_drvinfo(struct net_device *dev,
891                                 struct ethtool_drvinfo *info)
892 {
893         struct rtl8169_private *tp = netdev_priv(dev);
894
895         strcpy(info->driver, MODULENAME);
896         strcpy(info->version, RTL8169_VERSION);
897         strcpy(info->bus_info, pci_name(tp->pci_dev));
898 }
899
900 static int rtl8169_get_regs_len(struct net_device *dev)
901 {
902         return R8169_REGS_SIZE;
903 }
904
905 static int rtl8169_set_speed_tbi(struct net_device *dev,
906                                  u8 autoneg, u16 speed, u8 duplex)
907 {
908         struct rtl8169_private *tp = netdev_priv(dev);
909         void __iomem *ioaddr = tp->mmio_addr;
910         int ret = 0;
911         u32 reg;
912
913         reg = RTL_R32(TBICSR);
914         if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) &&
915             (duplex == DUPLEX_FULL)) {
916                 RTL_W32(TBICSR, reg & ~(TBINwEnable | TBINwRestart));
917         } else if (autoneg == AUTONEG_ENABLE)
918                 RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart);
919         else {
920                 netif_warn(tp, link, dev,
921                            "incorrect speed setting refused in TBI mode\n");
922                 ret = -EOPNOTSUPP;
923         }
924
925         return ret;
926 }
927
928 static int rtl8169_set_speed_xmii(struct net_device *dev,
929                                   u8 autoneg, u16 speed, u8 duplex)
930 {
931         struct rtl8169_private *tp = netdev_priv(dev);
932         int giga_ctrl, bmcr;
933
934         if (autoneg == AUTONEG_ENABLE) {
935                 int auto_nego;
936
937                 auto_nego = rtl_readphy(tp, MII_ADVERTISE);
938                 auto_nego |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
939                               ADVERTISE_100HALF | ADVERTISE_100FULL);
940                 auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
941
942                 giga_ctrl = rtl_readphy(tp, MII_CTRL1000);
943                 giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
944
945                 /* The 8100e/8101e/8102e do Fast Ethernet only. */
946                 if ((tp->mac_version != RTL_GIGA_MAC_VER_07) &&
947                     (tp->mac_version != RTL_GIGA_MAC_VER_08) &&
948                     (tp->mac_version != RTL_GIGA_MAC_VER_09) &&
949                     (tp->mac_version != RTL_GIGA_MAC_VER_10) &&
950                     (tp->mac_version != RTL_GIGA_MAC_VER_13) &&
951                     (tp->mac_version != RTL_GIGA_MAC_VER_14) &&
952                     (tp->mac_version != RTL_GIGA_MAC_VER_15) &&
953                     (tp->mac_version != RTL_GIGA_MAC_VER_16)) {
954                         giga_ctrl |= ADVERTISE_1000FULL | ADVERTISE_1000HALF;
955                 } else {
956                         netif_info(tp, link, dev,
957                                    "PHY does not support 1000Mbps\n");
958                 }
959
960                 bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
961
962                 if ((tp->mac_version == RTL_GIGA_MAC_VER_11) ||
963                     (tp->mac_version == RTL_GIGA_MAC_VER_12) ||
964                     (tp->mac_version >= RTL_GIGA_MAC_VER_17)) {
965                         /*
966                          * Wake up the PHY.
967                          * Vendor specific (0x1f) and reserved (0x0e) MII
968                          * registers.
969                          */
970                         rtl_writephy(tp, 0x1f, 0x0000);
971                         rtl_writephy(tp, 0x0e, 0x0000);
972                 }
973
974                 rtl_writephy(tp, MII_ADVERTISE, auto_nego);
975                 rtl_writephy(tp, MII_CTRL1000, giga_ctrl);
976         } else {
977                 giga_ctrl = 0;
978
979                 if (speed == SPEED_10)
980                         bmcr = 0;
981                 else if (speed == SPEED_100)
982                         bmcr = BMCR_SPEED100;
983                 else
984                         return -EINVAL;
985
986                 if (duplex == DUPLEX_FULL)
987                         bmcr |= BMCR_FULLDPLX;
988
989                 rtl_writephy(tp, 0x1f, 0x0000);
990         }
991
992         tp->phy_1000_ctrl_reg = giga_ctrl;
993
994         rtl_writephy(tp, MII_BMCR, bmcr);
995
996         if ((tp->mac_version == RTL_GIGA_MAC_VER_02) ||
997             (tp->mac_version == RTL_GIGA_MAC_VER_03)) {
998                 if ((speed == SPEED_100) && (autoneg != AUTONEG_ENABLE)) {
999                         rtl_writephy(tp, 0x17, 0x2138);
1000                         rtl_writephy(tp, 0x0e, 0x0260);
1001                 } else {
1002                         rtl_writephy(tp, 0x17, 0x2108);
1003                         rtl_writephy(tp, 0x0e, 0x0000);
1004                 }
1005         }
1006
1007         return 0;
1008 }
1009
1010 static int rtl8169_set_speed(struct net_device *dev,
1011                              u8 autoneg, u16 speed, u8 duplex)
1012 {
1013         struct rtl8169_private *tp = netdev_priv(dev);
1014         int ret;
1015
1016         ret = tp->set_speed(dev, autoneg, speed, duplex);
1017
1018         if (netif_running(dev) && (tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL))
1019                 mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
1020
1021         return ret;
1022 }
1023
1024 static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1025 {
1026         struct rtl8169_private *tp = netdev_priv(dev);
1027         unsigned long flags;
1028         int ret;
1029
1030         spin_lock_irqsave(&tp->lock, flags);
1031         ret = rtl8169_set_speed(dev, cmd->autoneg, cmd->speed, cmd->duplex);
1032         spin_unlock_irqrestore(&tp->lock, flags);
1033
1034         return ret;
1035 }
1036
1037 static u32 rtl8169_get_rx_csum(struct net_device *dev)
1038 {
1039         struct rtl8169_private *tp = netdev_priv(dev);
1040
1041         return tp->cp_cmd & RxChkSum;
1042 }
1043
1044 static int rtl8169_set_rx_csum(struct net_device *dev, u32 data)
1045 {
1046         struct rtl8169_private *tp = netdev_priv(dev);
1047         void __iomem *ioaddr = tp->mmio_addr;
1048         unsigned long flags;
1049
1050         spin_lock_irqsave(&tp->lock, flags);
1051
1052         if (data)
1053                 tp->cp_cmd |= RxChkSum;
1054         else
1055                 tp->cp_cmd &= ~RxChkSum;
1056
1057         RTL_W16(CPlusCmd, tp->cp_cmd);
1058         RTL_R16(CPlusCmd);
1059
1060         spin_unlock_irqrestore(&tp->lock, flags);
1061
1062         return 0;
1063 }
1064
1065 #ifdef CONFIG_R8169_VLAN
1066
1067 static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
1068                                       struct sk_buff *skb)
1069 {
1070         return (vlan_tx_tag_present(skb)) ?
1071                 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
1072 }
1073
1074 static void rtl8169_vlan_rx_register(struct net_device *dev,
1075                                      struct vlan_group *grp)
1076 {
1077         struct rtl8169_private *tp = netdev_priv(dev);
1078         void __iomem *ioaddr = tp->mmio_addr;
1079         unsigned long flags;
1080
1081         spin_lock_irqsave(&tp->lock, flags);
1082         tp->vlgrp = grp;
1083         /*
1084          * Do not disable RxVlan on 8110SCd.
1085          */
1086         if (tp->vlgrp || (tp->mac_version == RTL_GIGA_MAC_VER_05))
1087                 tp->cp_cmd |= RxVlan;
1088         else
1089                 tp->cp_cmd &= ~RxVlan;
1090         RTL_W16(CPlusCmd, tp->cp_cmd);
1091         RTL_R16(CPlusCmd);
1092         spin_unlock_irqrestore(&tp->lock, flags);
1093 }
1094
1095 static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
1096                                struct sk_buff *skb, int polling)
1097 {
1098         u32 opts2 = le32_to_cpu(desc->opts2);
1099         struct vlan_group *vlgrp = tp->vlgrp;
1100         int ret;
1101
1102         if (vlgrp && (opts2 & RxVlanTag)) {
1103                 u16 vtag = swab16(opts2 & 0xffff);
1104
1105                 if (likely(polling))
1106                         vlan_gro_receive(&tp->napi, vlgrp, vtag, skb);
1107                 else
1108                         __vlan_hwaccel_rx(skb, vlgrp, vtag, polling);
1109                 ret = 0;
1110         } else
1111                 ret = -1;
1112         desc->opts2 = 0;
1113         return ret;
1114 }
1115
1116 #else /* !CONFIG_R8169_VLAN */
1117
1118 static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
1119                                       struct sk_buff *skb)
1120 {
1121         return 0;
1122 }
1123
1124 static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
1125                                struct sk_buff *skb, int polling)
1126 {
1127         return -1;
1128 }
1129
1130 #endif
1131
1132 static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
1133 {
1134         struct rtl8169_private *tp = netdev_priv(dev);
1135         void __iomem *ioaddr = tp->mmio_addr;
1136         u32 status;
1137
1138         cmd->supported =
1139                 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE;
1140         cmd->port = PORT_FIBRE;
1141         cmd->transceiver = XCVR_INTERNAL;
1142
1143         status = RTL_R32(TBICSR);
1144         cmd->advertising = (status & TBINwEnable) ?  ADVERTISED_Autoneg : 0;
1145         cmd->autoneg = !!(status & TBINwEnable);
1146
1147         cmd->speed = SPEED_1000;
1148         cmd->duplex = DUPLEX_FULL; /* Always set */
1149
1150         return 0;
1151 }
1152
1153 static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
1154 {
1155         struct rtl8169_private *tp = netdev_priv(dev);
1156
1157         return mii_ethtool_gset(&tp->mii, cmd);
1158 }
1159
1160 static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1161 {
1162         struct rtl8169_private *tp = netdev_priv(dev);
1163         unsigned long flags;
1164         int rc;
1165
1166         spin_lock_irqsave(&tp->lock, flags);
1167
1168         rc = tp->get_settings(dev, cmd);
1169
1170         spin_unlock_irqrestore(&tp->lock, flags);
1171         return rc;
1172 }
1173
1174 static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1175                              void *p)
1176 {
1177         struct rtl8169_private *tp = netdev_priv(dev);
1178         unsigned long flags;
1179
1180         if (regs->len > R8169_REGS_SIZE)
1181                 regs->len = R8169_REGS_SIZE;
1182
1183         spin_lock_irqsave(&tp->lock, flags);
1184         memcpy_fromio(p, tp->mmio_addr, regs->len);
1185         spin_unlock_irqrestore(&tp->lock, flags);
1186 }
1187
1188 static u32 rtl8169_get_msglevel(struct net_device *dev)
1189 {
1190         struct rtl8169_private *tp = netdev_priv(dev);
1191
1192         return tp->msg_enable;
1193 }
1194
1195 static void rtl8169_set_msglevel(struct net_device *dev, u32 value)
1196 {
1197         struct rtl8169_private *tp = netdev_priv(dev);
1198
1199         tp->msg_enable = value;
1200 }
1201
1202 static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
1203         "tx_packets",
1204         "rx_packets",
1205         "tx_errors",
1206         "rx_errors",
1207         "rx_missed",
1208         "align_errors",
1209         "tx_single_collisions",
1210         "tx_multi_collisions",
1211         "unicast",
1212         "broadcast",
1213         "multicast",
1214         "tx_aborted",
1215         "tx_underrun",
1216 };
1217
1218 static int rtl8169_get_sset_count(struct net_device *dev, int sset)
1219 {
1220         switch (sset) {
1221         case ETH_SS_STATS:
1222                 return ARRAY_SIZE(rtl8169_gstrings);
1223         default:
1224                 return -EOPNOTSUPP;
1225         }
1226 }
1227
1228 static void rtl8169_update_counters(struct net_device *dev)
1229 {
1230         struct rtl8169_private *tp = netdev_priv(dev);
1231         void __iomem *ioaddr = tp->mmio_addr;
1232         struct rtl8169_counters *counters;
1233         dma_addr_t paddr;
1234         u32 cmd;
1235         int wait = 1000;
1236         struct device *d = &tp->pci_dev->dev;
1237
1238         /*
1239          * Some chips are unable to dump tally counters when the receiver
1240          * is disabled.
1241          */
1242         if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
1243                 return;
1244
1245         counters = dma_alloc_coherent(d, sizeof(*counters), &paddr, GFP_KERNEL);
1246         if (!counters)
1247                 return;
1248
1249         RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
1250         cmd = (u64)paddr & DMA_BIT_MASK(32);
1251         RTL_W32(CounterAddrLow, cmd);
1252         RTL_W32(CounterAddrLow, cmd | CounterDump);
1253
1254         while (wait--) {
1255                 if ((RTL_R32(CounterAddrLow) & CounterDump) == 0) {
1256                         /* copy updated counters */
1257                         memcpy(&tp->counters, counters, sizeof(*counters));
1258                         break;
1259                 }
1260                 udelay(10);
1261         }
1262
1263         RTL_W32(CounterAddrLow, 0);
1264         RTL_W32(CounterAddrHigh, 0);
1265
1266         dma_free_coherent(d, sizeof(*counters), counters, paddr);
1267 }
1268
1269 static void rtl8169_get_ethtool_stats(struct net_device *dev,
1270                                       struct ethtool_stats *stats, u64 *data)
1271 {
1272         struct rtl8169_private *tp = netdev_priv(dev);
1273
1274         ASSERT_RTNL();
1275
1276         rtl8169_update_counters(dev);
1277
1278         data[0] = le64_to_cpu(tp->counters.tx_packets);
1279         data[1] = le64_to_cpu(tp->counters.rx_packets);
1280         data[2] = le64_to_cpu(tp->counters.tx_errors);
1281         data[3] = le32_to_cpu(tp->counters.rx_errors);
1282         data[4] = le16_to_cpu(tp->counters.rx_missed);
1283         data[5] = le16_to_cpu(tp->counters.align_errors);
1284         data[6] = le32_to_cpu(tp->counters.tx_one_collision);
1285         data[7] = le32_to_cpu(tp->counters.tx_multi_collision);
1286         data[8] = le64_to_cpu(tp->counters.rx_unicast);
1287         data[9] = le64_to_cpu(tp->counters.rx_broadcast);
1288         data[10] = le32_to_cpu(tp->counters.rx_multicast);
1289         data[11] = le16_to_cpu(tp->counters.tx_aborted);
1290         data[12] = le16_to_cpu(tp->counters.tx_underun);
1291 }
1292
1293 static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1294 {
1295         switch(stringset) {
1296         case ETH_SS_STATS:
1297                 memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
1298                 break;
1299         }
1300 }
1301
1302 static const struct ethtool_ops rtl8169_ethtool_ops = {
1303         .get_drvinfo            = rtl8169_get_drvinfo,
1304         .get_regs_len           = rtl8169_get_regs_len,
1305         .get_link               = ethtool_op_get_link,
1306         .get_settings           = rtl8169_get_settings,
1307         .set_settings           = rtl8169_set_settings,
1308         .get_msglevel           = rtl8169_get_msglevel,
1309         .set_msglevel           = rtl8169_set_msglevel,
1310         .get_rx_csum            = rtl8169_get_rx_csum,
1311         .set_rx_csum            = rtl8169_set_rx_csum,
1312         .set_tx_csum            = ethtool_op_set_tx_csum,
1313         .set_sg                 = ethtool_op_set_sg,
1314         .set_tso                = ethtool_op_set_tso,
1315         .get_regs               = rtl8169_get_regs,
1316         .get_wol                = rtl8169_get_wol,
1317         .set_wol                = rtl8169_set_wol,
1318         .get_strings            = rtl8169_get_strings,
1319         .get_sset_count         = rtl8169_get_sset_count,
1320         .get_ethtool_stats      = rtl8169_get_ethtool_stats,
1321 };
1322
1323 static void rtl8169_get_mac_version(struct rtl8169_private *tp,
1324                                     void __iomem *ioaddr)
1325 {
1326         /*
1327          * The driver currently handles the 8168Bf and the 8168Be identically
1328          * but they can be identified more specifically through the test below
1329          * if needed:
1330          *
1331          * (RTL_R32(TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be
1332          *
1333          * Same thing for the 8101Eb and the 8101Ec:
1334          *
1335          * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
1336          */
1337         static const struct {
1338                 u32 mask;
1339                 u32 val;
1340                 int mac_version;
1341         } mac_info[] = {
1342                 /* 8168D family. */
1343                 { 0x7cf00000, 0x28300000,       RTL_GIGA_MAC_VER_26 },
1344                 { 0x7cf00000, 0x28100000,       RTL_GIGA_MAC_VER_25 },
1345                 { 0x7c800000, 0x28800000,       RTL_GIGA_MAC_VER_27 },
1346                 { 0x7c800000, 0x28000000,       RTL_GIGA_MAC_VER_26 },
1347
1348                 /* 8168C family. */
1349                 { 0x7cf00000, 0x3cb00000,       RTL_GIGA_MAC_VER_24 },
1350                 { 0x7cf00000, 0x3c900000,       RTL_GIGA_MAC_VER_23 },
1351                 { 0x7cf00000, 0x3c800000,       RTL_GIGA_MAC_VER_18 },
1352                 { 0x7c800000, 0x3c800000,       RTL_GIGA_MAC_VER_24 },
1353                 { 0x7cf00000, 0x3c000000,       RTL_GIGA_MAC_VER_19 },
1354                 { 0x7cf00000, 0x3c200000,       RTL_GIGA_MAC_VER_20 },
1355                 { 0x7cf00000, 0x3c300000,       RTL_GIGA_MAC_VER_21 },
1356                 { 0x7cf00000, 0x3c400000,       RTL_GIGA_MAC_VER_22 },
1357                 { 0x7c800000, 0x3c000000,       RTL_GIGA_MAC_VER_22 },
1358
1359                 /* 8168B family. */
1360                 { 0x7cf00000, 0x38000000,       RTL_GIGA_MAC_VER_12 },
1361                 { 0x7cf00000, 0x38500000,       RTL_GIGA_MAC_VER_17 },
1362                 { 0x7c800000, 0x38000000,       RTL_GIGA_MAC_VER_17 },
1363                 { 0x7c800000, 0x30000000,       RTL_GIGA_MAC_VER_11 },
1364
1365                 /* 8101 family. */
1366                 { 0x7cf00000, 0x34a00000,       RTL_GIGA_MAC_VER_09 },
1367                 { 0x7cf00000, 0x24a00000,       RTL_GIGA_MAC_VER_09 },
1368                 { 0x7cf00000, 0x34900000,       RTL_GIGA_MAC_VER_08 },
1369                 { 0x7cf00000, 0x24900000,       RTL_GIGA_MAC_VER_08 },
1370                 { 0x7cf00000, 0x34800000,       RTL_GIGA_MAC_VER_07 },
1371                 { 0x7cf00000, 0x24800000,       RTL_GIGA_MAC_VER_07 },
1372                 { 0x7cf00000, 0x34000000,       RTL_GIGA_MAC_VER_13 },
1373                 { 0x7cf00000, 0x34300000,       RTL_GIGA_MAC_VER_10 },
1374                 { 0x7cf00000, 0x34200000,       RTL_GIGA_MAC_VER_16 },
1375                 { 0x7c800000, 0x34800000,       RTL_GIGA_MAC_VER_09 },
1376                 { 0x7c800000, 0x24800000,       RTL_GIGA_MAC_VER_09 },
1377                 { 0x7c800000, 0x34000000,       RTL_GIGA_MAC_VER_16 },
1378                 /* FIXME: where did these entries come from ? -- FR */
1379                 { 0xfc800000, 0x38800000,       RTL_GIGA_MAC_VER_15 },
1380                 { 0xfc800000, 0x30800000,       RTL_GIGA_MAC_VER_14 },
1381
1382                 /* 8110 family. */
1383                 { 0xfc800000, 0x98000000,       RTL_GIGA_MAC_VER_06 },
1384                 { 0xfc800000, 0x18000000,       RTL_GIGA_MAC_VER_05 },
1385                 { 0xfc800000, 0x10000000,       RTL_GIGA_MAC_VER_04 },
1386                 { 0xfc800000, 0x04000000,       RTL_GIGA_MAC_VER_03 },
1387                 { 0xfc800000, 0x00800000,       RTL_GIGA_MAC_VER_02 },
1388                 { 0xfc800000, 0x00000000,       RTL_GIGA_MAC_VER_01 },
1389
1390                 /* Catch-all */
1391                 { 0x00000000, 0x00000000,       RTL_GIGA_MAC_NONE   }
1392         }, *p = mac_info;
1393         u32 reg;
1394
1395         reg = RTL_R32(TxConfig);
1396         while ((reg & p->mask) != p->val)
1397                 p++;
1398         tp->mac_version = p->mac_version;
1399 }
1400
1401 static void rtl8169_print_mac_version(struct rtl8169_private *tp)
1402 {
1403         dprintk("mac_version = 0x%02x\n", tp->mac_version);
1404 }
1405
1406 struct phy_reg {
1407         u16 reg;
1408         u16 val;
1409 };
1410
1411 static void rtl_writephy_batch(struct rtl8169_private *tp,
1412                                const struct phy_reg *regs, int len)
1413 {
1414         while (len-- > 0) {
1415                 rtl_writephy(tp, regs->reg, regs->val);
1416                 regs++;
1417         }
1418 }
1419
1420 #define PHY_READ                0x00000000
1421 #define PHY_DATA_OR             0x10000000
1422 #define PHY_DATA_AND            0x20000000
1423 #define PHY_BJMPN               0x30000000
1424 #define PHY_READ_EFUSE          0x40000000
1425 #define PHY_READ_MAC_BYTE       0x50000000
1426 #define PHY_WRITE_MAC_BYTE      0x60000000
1427 #define PHY_CLEAR_READCOUNT     0x70000000
1428 #define PHY_WRITE               0x80000000
1429 #define PHY_READCOUNT_EQ_SKIP   0x90000000
1430 #define PHY_COMP_EQ_SKIPN       0xa0000000
1431 #define PHY_COMP_NEQ_SKIPN      0xb0000000
1432 #define PHY_WRITE_PREVIOUS      0xc0000000
1433 #define PHY_SKIPN               0xd0000000
1434 #define PHY_DELAY_MS            0xe0000000
1435 #define PHY_WRITE_ERI_WORD      0xf0000000
1436
1437 static void
1438 rtl_phy_write_fw(struct rtl8169_private *tp, const struct firmware *fw)
1439 {
1440         __le32 *phytable = (__le32 *)fw->data;
1441         struct net_device *dev = tp->dev;
1442         size_t i;
1443
1444         if (fw->size % sizeof(*phytable)) {
1445                 netif_err(tp, probe, dev, "odd sized firmware %zd\n", fw->size);
1446                 return;
1447         }
1448
1449         for (i = 0; i < fw->size / sizeof(*phytable); i++) {
1450                 u32 action = le32_to_cpu(phytable[i]);
1451
1452                 if (!action)
1453                         break;
1454
1455                 if ((action & 0xf0000000) != PHY_WRITE) {
1456                         netif_err(tp, probe, dev,
1457                                   "unknown action 0x%08x\n", action);
1458                         return;
1459                 }
1460         }
1461
1462         while (i-- != 0) {
1463                 u32 action = le32_to_cpu(*phytable);
1464                 u32 data = action & 0x0000ffff;
1465                 u32 reg = (action & 0x0fff0000) >> 16;
1466
1467                 switch(action & 0xf0000000) {
1468                 case PHY_WRITE:
1469                         rtl_writephy(tp, reg, data);
1470                         phytable++;
1471                         break;
1472                 default:
1473                         BUG();
1474                 }
1475         }
1476 }
1477
1478 static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
1479 {
1480         static const struct phy_reg phy_reg_init[] = {
1481                 { 0x1f, 0x0001 },
1482                 { 0x06, 0x006e },
1483                 { 0x08, 0x0708 },
1484                 { 0x15, 0x4000 },
1485                 { 0x18, 0x65c7 },
1486
1487                 { 0x1f, 0x0001 },
1488                 { 0x03, 0x00a1 },
1489                 { 0x02, 0x0008 },
1490                 { 0x01, 0x0120 },
1491                 { 0x00, 0x1000 },
1492                 { 0x04, 0x0800 },
1493                 { 0x04, 0x0000 },
1494
1495                 { 0x03, 0xff41 },
1496                 { 0x02, 0xdf60 },
1497                 { 0x01, 0x0140 },
1498                 { 0x00, 0x0077 },
1499                 { 0x04, 0x7800 },
1500                 { 0x04, 0x7000 },
1501
1502                 { 0x03, 0x802f },
1503                 { 0x02, 0x4f02 },
1504                 { 0x01, 0x0409 },
1505                 { 0x00, 0xf0f9 },
1506                 { 0x04, 0x9800 },
1507                 { 0x04, 0x9000 },
1508
1509                 { 0x03, 0xdf01 },
1510                 { 0x02, 0xdf20 },
1511                 { 0x01, 0xff95 },
1512                 { 0x00, 0xba00 },
1513                 { 0x04, 0xa800 },
1514                 { 0x04, 0xa000 },
1515
1516                 { 0x03, 0xff41 },
1517                 { 0x02, 0xdf20 },
1518                 { 0x01, 0x0140 },
1519                 { 0x00, 0x00bb },
1520                 { 0x04, 0xb800 },
1521                 { 0x04, 0xb000 },
1522
1523                 { 0x03, 0xdf41 },
1524                 { 0x02, 0xdc60 },
1525                 { 0x01, 0x6340 },
1526                 { 0x00, 0x007d },
1527                 { 0x04, 0xd800 },
1528                 { 0x04, 0xd000 },
1529
1530                 { 0x03, 0xdf01 },
1531                 { 0x02, 0xdf20 },
1532                 { 0x01, 0x100a },
1533                 { 0x00, 0xa0ff },
1534                 { 0x04, 0xf800 },
1535                 { 0x04, 0xf000 },
1536
1537                 { 0x1f, 0x0000 },
1538                 { 0x0b, 0x0000 },
1539                 { 0x00, 0x9200 }
1540         };
1541
1542         rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1543 }
1544
1545 static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp)
1546 {
1547         static const struct phy_reg phy_reg_init[] = {
1548                 { 0x1f, 0x0002 },
1549                 { 0x01, 0x90d0 },
1550                 { 0x1f, 0x0000 }
1551         };
1552
1553         rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1554 }
1555
1556 static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp)
1557 {
1558         struct pci_dev *pdev = tp->pci_dev;
1559         u16 vendor_id, device_id;
1560
1561         pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &vendor_id);
1562         pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &device_id);
1563
1564         if ((vendor_id != PCI_VENDOR_ID_GIGABYTE) || (device_id != 0xe000))
1565                 return;
1566
1567         rtl_writephy(tp, 0x1f, 0x0001);
1568         rtl_writephy(tp, 0x10, 0xf01b);
1569         rtl_writephy(tp, 0x1f, 0x0000);
1570 }
1571
1572 static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp)
1573 {
1574         static const struct phy_reg phy_reg_init[] = {
1575                 { 0x1f, 0x0001 },
1576                 { 0x04, 0x0000 },
1577                 { 0x03, 0x00a1 },
1578                 { 0x02, 0x0008 },
1579                 { 0x01, 0x0120 },
1580                 { 0x00, 0x1000 },
1581                 { 0x04, 0x0800 },
1582                 { 0x04, 0x9000 },
1583                 { 0x03, 0x802f },
1584                 { 0x02, 0x4f02 },
1585                 { 0x01, 0x0409 },
1586                 { 0x00, 0xf099 },
1587                 { 0x04, 0x9800 },
1588                 { 0x04, 0xa000 },
1589                 { 0x03, 0xdf01 },
1590                 { 0x02, 0xdf20 },
1591                 { 0x01, 0xff95 },
1592                 { 0x00, 0xba00 },
1593                 { 0x04, 0xa800 },
1594                 { 0x04, 0xf000 },
1595                 { 0x03, 0xdf01 },
1596                 { 0x02, 0xdf20 },
1597                 { 0x01, 0x101a },
1598                 { 0x00, 0xa0ff },
1599                 { 0x04, 0xf800 },
1600                 { 0x04, 0x0000 },
1601                 { 0x1f, 0x0000 },
1602
1603                 { 0x1f, 0x0001 },
1604                 { 0x10, 0xf41b },
1605                 { 0x14, 0xfb54 },
1606                 { 0x18, 0xf5c7 },
1607                 { 0x1f, 0x0000 },
1608
1609                 { 0x1f, 0x0001 },
1610                 { 0x17, 0x0cc0 },
1611                 { 0x1f, 0x0000 }
1612         };
1613
1614         rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1615
1616         rtl8169scd_hw_phy_config_quirk(tp);
1617 }
1618
1619 static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp)
1620 {
1621         static const struct phy_reg phy_reg_init[] = {
1622                 { 0x1f, 0x0001 },
1623                 { 0x04, 0x0000 },
1624                 { 0x03, 0x00a1 },
1625                 { 0x02, 0x0008 },
1626                 { 0x01, 0x0120 },
1627                 { 0x00, 0x1000 },
1628                 { 0x04, 0x0800 },
1629                 { 0x04, 0x9000 },
1630                 { 0x03, 0x802f },
1631                 { 0x02, 0x4f02 },
1632                 { 0x01, 0x0409 },
1633                 { 0x00, 0xf099 },
1634                 { 0x04, 0x9800 },
1635                 { 0x04, 0xa000 },
1636                 { 0x03, 0xdf01 },
1637                 { 0x02, 0xdf20 },
1638                 { 0x01, 0xff95 },
1639                 { 0x00, 0xba00 },
1640                 { 0x04, 0xa800 },
1641                 { 0x04, 0xf000 },
1642                 { 0x03, 0xdf01 },
1643                 { 0x02, 0xdf20 },
1644                 { 0x01, 0x101a },
1645                 { 0x00, 0xa0ff },
1646                 { 0x04, 0xf800 },
1647                 { 0x04, 0x0000 },
1648                 { 0x1f, 0x0000 },
1649
1650                 { 0x1f, 0x0001 },
1651                 { 0x0b, 0x8480 },
1652                 { 0x1f, 0x0000 },
1653
1654                 { 0x1f, 0x0001 },
1655                 { 0x18, 0x67c7 },
1656                 { 0x04, 0x2000 },
1657                 { 0x03, 0x002f },
1658                 { 0x02, 0x4360 },
1659                 { 0x01, 0x0109 },
1660                 { 0x00, 0x3022 },
1661                 { 0x04, 0x2800 },
1662                 { 0x1f, 0x0000 },
1663
1664                 { 0x1f, 0x0001 },
1665                 { 0x17, 0x0cc0 },
1666                 { 0x1f, 0x0000 }
1667         };
1668
1669         rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1670 }
1671
1672 static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp)
1673 {
1674         static const struct phy_reg phy_reg_init[] = {
1675                 { 0x10, 0xf41b },
1676                 { 0x1f, 0x0000 }
1677         };
1678
1679         rtl_writephy(tp, 0x1f, 0x0001);
1680         rtl_patchphy(tp, 0x16, 1 << 0);
1681
1682         rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1683 }
1684
1685 static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp)
1686 {
1687         static const struct phy_reg phy_reg_init[] = {
1688                 { 0x1f, 0x0001 },
1689                 { 0x10, 0xf41b },
1690                 { 0x1f, 0x0000 }
1691         };
1692
1693         rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1694 }
1695
1696 static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp)
1697 {
1698         static const struct phy_reg phy_reg_init[] = {
1699                 { 0x1f, 0x0000 },
1700                 { 0x1d, 0x0f00 },
1701                 { 0x1f, 0x0002 },
1702                 { 0x0c, 0x1ec8 },
1703                 { 0x1f, 0x0000 }
1704         };
1705
1706         rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1707 }
1708
1709 static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp)
1710 {
1711         static const struct phy_reg phy_reg_init[] = {
1712                 { 0x1f, 0x0001 },
1713                 { 0x1d, 0x3d98 },
1714                 { 0x1f, 0x0000 }
1715         };
1716
1717         rtl_writephy(tp, 0x1f, 0x0000);
1718         rtl_patchphy(tp, 0x14, 1 << 5);
1719         rtl_patchphy(tp, 0x0d, 1 << 5);
1720
1721         rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1722 }
1723
1724 static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp)
1725 {
1726         static const struct phy_reg phy_reg_init[] = {
1727                 { 0x1f, 0x0001 },
1728                 { 0x12, 0x2300 },
1729                 { 0x1f, 0x0002 },
1730                 { 0x00, 0x88d4 },
1731                 { 0x01, 0x82b1 },
1732                 { 0x03, 0x7002 },
1733                 { 0x08, 0x9e30 },
1734                 { 0x09, 0x01f0 },
1735                 { 0x0a, 0x5500 },
1736                 { 0x0c, 0x00c8 },
1737                 { 0x1f, 0x0003 },
1738                 { 0x12, 0xc096 },
1739                 { 0x16, 0x000a },
1740                 { 0x1f, 0x0000 },
1741                 { 0x1f, 0x0000 },
1742                 { 0x09, 0x2000 },
1743                 { 0x09, 0x0000 }
1744         };
1745
1746         rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1747
1748         rtl_patchphy(tp, 0x14, 1 << 5);
1749         rtl_patchphy(tp, 0x0d, 1 << 5);
1750         rtl_writephy(tp, 0x1f, 0x0000);
1751 }
1752
1753 static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp)
1754 {
1755         static const struct phy_reg phy_reg_init[] = {
1756                 { 0x1f, 0x0001 },
1757                 { 0x12, 0x2300 },
1758                 { 0x03, 0x802f },
1759                 { 0x02, 0x4f02 },
1760                 { 0x01, 0x0409 },
1761                 { 0x00, 0xf099 },
1762                 { 0x04, 0x9800 },
1763                 { 0x04, 0x9000 },
1764                 { 0x1d, 0x3d98 },
1765                 { 0x1f, 0x0002 },
1766                 { 0x0c, 0x7eb8 },
1767                 { 0x06, 0x0761 },
1768                 { 0x1f, 0x0003 },
1769                 { 0x16, 0x0f0a },
1770                 { 0x1f, 0x0000 }
1771         };
1772
1773         rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1774
1775         rtl_patchphy(tp, 0x16, 1 << 0);
1776         rtl_patchphy(tp, 0x14, 1 << 5);
1777         rtl_patchphy(tp, 0x0d, 1 << 5);
1778         rtl_writephy(tp, 0x1f, 0x0000);
1779 }
1780
1781 static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp)
1782 {
1783         static const struct phy_reg phy_reg_init[] = {
1784                 { 0x1f, 0x0001 },
1785                 { 0x12, 0x2300 },
1786                 { 0x1d, 0x3d98 },
1787                 { 0x1f, 0x0002 },
1788                 { 0x0c, 0x7eb8 },
1789                 { 0x06, 0x5461 },
1790                 { 0x1f, 0x0003 },
1791                 { 0x16, 0x0f0a },
1792                 { 0x1f, 0x0000 }
1793         };
1794
1795         rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1796
1797         rtl_patchphy(tp, 0x16, 1 << 0);
1798         rtl_patchphy(tp, 0x14, 1 << 5);
1799         rtl_patchphy(tp, 0x0d, 1 << 5);
1800         rtl_writephy(tp, 0x1f, 0x0000);
1801 }
1802
1803 static void rtl8168c_4_hw_phy_config(struct rtl8169_private *tp)
1804 {
1805         rtl8168c_3_hw_phy_config(tp);
1806 }
1807
1808 static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
1809 {
1810         static const struct phy_reg phy_reg_init_0[] = {
1811                 /* Channel Estimation */
1812                 { 0x1f, 0x0001 },
1813                 { 0x06, 0x4064 },
1814                 { 0x07, 0x2863 },
1815                 { 0x08, 0x059c },
1816                 { 0x09, 0x26b4 },
1817                 { 0x0a, 0x6a19 },
1818                 { 0x0b, 0xdcc8 },
1819                 { 0x10, 0xf06d },
1820                 { 0x14, 0x7f68 },
1821                 { 0x18, 0x7fd9 },
1822                 { 0x1c, 0xf0ff },
1823                 { 0x1d, 0x3d9c },
1824                 { 0x1f, 0x0003 },
1825                 { 0x12, 0xf49f },
1826                 { 0x13, 0x070b },
1827                 { 0x1a, 0x05ad },
1828                 { 0x14, 0x94c0 },
1829
1830                 /*
1831                  * Tx Error Issue
1832                  * enhance line driver power
1833                  */
1834                 { 0x1f, 0x0002 },
1835                 { 0x06, 0x5561 },
1836                 { 0x1f, 0x0005 },
1837                 { 0x05, 0x8332 },
1838                 { 0x06, 0x5561 },
1839
1840                 /*
1841                  * Can not link to 1Gbps with bad cable
1842                  * Decrease SNR threshold form 21.07dB to 19.04dB
1843                  */
1844                 { 0x1f, 0x0001 },
1845                 { 0x17, 0x0cc0 },
1846
1847                 { 0x1f, 0x0000 },
1848                 { 0x0d, 0xf880 }
1849         };
1850         void __iomem *ioaddr = tp->mmio_addr;
1851         const struct firmware *fw;
1852
1853         rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
1854
1855         /*
1856          * Rx Error Issue
1857          * Fine Tune Switching regulator parameter
1858          */
1859         rtl_writephy(tp, 0x1f, 0x0002);
1860         rtl_w1w0_phy(tp, 0x0b, 0x0010, 0x00ef);
1861         rtl_w1w0_phy(tp, 0x0c, 0xa200, 0x5d00);
1862
1863         if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) {
1864                 static const struct phy_reg phy_reg_init[] = {
1865                         { 0x1f, 0x0002 },
1866                         { 0x05, 0x669a },
1867                         { 0x1f, 0x0005 },
1868                         { 0x05, 0x8330 },
1869                         { 0x06, 0x669a },
1870                         { 0x1f, 0x0002 }
1871                 };
1872                 int val;
1873
1874                 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1875
1876                 val = rtl_readphy(tp, 0x0d);
1877
1878                 if ((val & 0x00ff) != 0x006c) {
1879                         static const u32 set[] = {
1880                                 0x0065, 0x0066, 0x0067, 0x0068,
1881                                 0x0069, 0x006a, 0x006b, 0x006c
1882                         };
1883                         int i;
1884
1885                         rtl_writephy(tp, 0x1f, 0x0002);
1886
1887                         val &= 0xff00;
1888                         for (i = 0; i < ARRAY_SIZE(set); i++)
1889                                 rtl_writephy(tp, 0x0d, val | set[i]);
1890                 }
1891         } else {
1892                 static const struct phy_reg phy_reg_init[] = {
1893                         { 0x1f, 0x0002 },
1894                         { 0x05, 0x6662 },
1895                         { 0x1f, 0x0005 },
1896                         { 0x05, 0x8330 },
1897                         { 0x06, 0x6662 }
1898                 };
1899
1900                 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1901         }
1902
1903         /* RSET couple improve */
1904         rtl_writephy(tp, 0x1f, 0x0002);
1905         rtl_patchphy(tp, 0x0d, 0x0300);
1906         rtl_patchphy(tp, 0x0f, 0x0010);
1907
1908         /* Fine tune PLL performance */
1909         rtl_writephy(tp, 0x1f, 0x0002);
1910         rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
1911         rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
1912
1913         rtl_writephy(tp, 0x1f, 0x0005);
1914         rtl_writephy(tp, 0x05, 0x001b);
1915         if (rtl_readphy(tp, 0x06) == 0xbf00 &&
1916             request_firmware(&fw, FIRMWARE_8168D_1, &tp->pci_dev->dev) == 0) {
1917                 rtl_phy_write_fw(tp, fw);
1918                 release_firmware(fw);
1919         } else {
1920                 netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n");
1921         }
1922
1923         rtl_writephy(tp, 0x1f, 0x0000);
1924 }
1925
1926 static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
1927 {
1928         static const struct phy_reg phy_reg_init_0[] = {
1929                 /* Channel Estimation */
1930                 { 0x1f, 0x0001 },
1931                 { 0x06, 0x4064 },
1932                 { 0x07, 0x2863 },
1933                 { 0x08, 0x059c },
1934                 { 0x09, 0x26b4 },
1935                 { 0x0a, 0x6a19 },
1936                 { 0x0b, 0xdcc8 },
1937                 { 0x10, 0xf06d },
1938                 { 0x14, 0x7f68 },
1939                 { 0x18, 0x7fd9 },
1940                 { 0x1c, 0xf0ff },
1941                 { 0x1d, 0x3d9c },
1942                 { 0x1f, 0x0003 },
1943                 { 0x12, 0xf49f },
1944                 { 0x13, 0x070b },
1945                 { 0x1a, 0x05ad },
1946                 { 0x14, 0x94c0 },
1947
1948                 /*
1949                  * Tx Error Issue
1950                  * enhance line driver power
1951                  */
1952                 { 0x1f, 0x0002 },
1953                 { 0x06, 0x5561 },
1954                 { 0x1f, 0x0005 },
1955                 { 0x05, 0x8332 },
1956                 { 0x06, 0x5561 },
1957
1958                 /*
1959                  * Can not link to 1Gbps with bad cable
1960                  * Decrease SNR threshold form 21.07dB to 19.04dB
1961                  */
1962                 { 0x1f, 0x0001 },
1963                 { 0x17, 0x0cc0 },
1964
1965                 { 0x1f, 0x0000 },
1966                 { 0x0d, 0xf880 }
1967         };
1968         void __iomem *ioaddr = tp->mmio_addr;
1969         const struct firmware *fw;
1970
1971         rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
1972
1973         if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) {
1974                 static const struct phy_reg phy_reg_init[] = {
1975                         { 0x1f, 0x0002 },
1976                         { 0x05, 0x669a },
1977                         { 0x1f, 0x0005 },
1978                         { 0x05, 0x8330 },
1979                         { 0x06, 0x669a },
1980
1981                         { 0x1f, 0x0002 }
1982                 };
1983                 int val;
1984
1985                 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1986
1987                 val = rtl_readphy(tp, 0x0d);
1988                 if ((val & 0x00ff) != 0x006c) {
1989                         static const u32 set[] = {
1990                                 0x0065, 0x0066, 0x0067, 0x0068,
1991                                 0x0069, 0x006a, 0x006b, 0x006c
1992                         };
1993                         int i;
1994
1995                         rtl_writephy(tp, 0x1f, 0x0002);
1996
1997                         val &= 0xff00;
1998                         for (i = 0; i < ARRAY_SIZE(set); i++)
1999                                 rtl_writephy(tp, 0x0d, val | set[i]);
2000                 }
2001         } else {
2002                 static const struct phy_reg phy_reg_init[] = {
2003                         { 0x1f, 0x0002 },
2004                         { 0x05, 0x2642 },
2005                         { 0x1f, 0x0005 },
2006                         { 0x05, 0x8330 },
2007                         { 0x06, 0x2642 }
2008                 };
2009
2010                 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2011         }
2012
2013         /* Fine tune PLL performance */
2014         rtl_writephy(tp, 0x1f, 0x0002);
2015         rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
2016         rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
2017
2018         /* Switching regulator Slew rate */
2019         rtl_writephy(tp, 0x1f, 0x0002);
2020         rtl_patchphy(tp, 0x0f, 0x0017);
2021
2022         rtl_writephy(tp, 0x1f, 0x0005);
2023         rtl_writephy(tp, 0x05, 0x001b);
2024         if (rtl_readphy(tp, 0x06) == 0xb300 &&
2025             request_firmware(&fw, FIRMWARE_8168D_2, &tp->pci_dev->dev) == 0) {
2026                 rtl_phy_write_fw(tp, fw);
2027                 release_firmware(fw);
2028         } else {
2029                 netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n");
2030         }
2031
2032         rtl_writephy(tp, 0x1f, 0x0000);
2033 }
2034
2035 static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp)
2036 {
2037         static const struct phy_reg phy_reg_init[] = {
2038                 { 0x1f, 0x0002 },
2039                 { 0x10, 0x0008 },
2040                 { 0x0d, 0x006c },
2041
2042                 { 0x1f, 0x0000 },
2043                 { 0x0d, 0xf880 },
2044
2045                 { 0x1f, 0x0001 },
2046                 { 0x17, 0x0cc0 },
2047
2048                 { 0x1f, 0x0001 },
2049                 { 0x0b, 0xa4d8 },
2050                 { 0x09, 0x281c },
2051                 { 0x07, 0x2883 },
2052                 { 0x0a, 0x6b35 },
2053                 { 0x1d, 0x3da4 },
2054                 { 0x1c, 0xeffd },
2055                 { 0x14, 0x7f52 },
2056                 { 0x18, 0x7fc6 },
2057                 { 0x08, 0x0601 },
2058                 { 0x06, 0x4063 },
2059                 { 0x10, 0xf074 },
2060                 { 0x1f, 0x0003 },
2061                 { 0x13, 0x0789 },
2062                 { 0x12, 0xf4bd },
2063                 { 0x1a, 0x04fd },
2064                 { 0x14, 0x84b0 },
2065                 { 0x1f, 0x0000 },
2066                 { 0x00, 0x9200 },
2067
2068                 { 0x1f, 0x0005 },
2069                 { 0x01, 0x0340 },
2070                 { 0x1f, 0x0001 },
2071                 { 0x04, 0x4000 },
2072                 { 0x03, 0x1d21 },
2073                 { 0x02, 0x0c32 },
2074                 { 0x01, 0x0200 },
2075                 { 0x00, 0x5554 },
2076                 { 0x04, 0x4800 },
2077                 { 0x04, 0x4000 },
2078                 { 0x04, 0xf000 },
2079                 { 0x03, 0xdf01 },
2080                 { 0x02, 0xdf20 },
2081                 { 0x01, 0x101a },
2082                 { 0x00, 0xa0ff },
2083                 { 0x04, 0xf800 },
2084                 { 0x04, 0xf000 },
2085                 { 0x1f, 0x0000 },
2086
2087                 { 0x1f, 0x0007 },
2088                 { 0x1e, 0x0023 },
2089                 { 0x16, 0x0000 },
2090                 { 0x1f, 0x0000 }
2091         };
2092
2093         rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2094 }
2095
2096 static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
2097 {
2098         static const struct phy_reg phy_reg_init[] = {
2099                 { 0x1f, 0x0003 },
2100                 { 0x08, 0x441d },
2101                 { 0x01, 0x9100 },
2102                 { 0x1f, 0x0000 }
2103         };
2104
2105         rtl_writephy(tp, 0x1f, 0x0000);
2106         rtl_patchphy(tp, 0x11, 1 << 12);
2107         rtl_patchphy(tp, 0x19, 1 << 13);
2108         rtl_patchphy(tp, 0x10, 1 << 15);
2109
2110         rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2111 }
2112
2113 static void rtl_hw_phy_config(struct net_device *dev)
2114 {
2115         struct rtl8169_private *tp = netdev_priv(dev);
2116
2117         rtl8169_print_mac_version(tp);
2118
2119         switch (tp->mac_version) {
2120         case RTL_GIGA_MAC_VER_01:
2121                 break;
2122         case RTL_GIGA_MAC_VER_02:
2123         case RTL_GIGA_MAC_VER_03:
2124                 rtl8169s_hw_phy_config(tp);
2125                 break;
2126         case RTL_GIGA_MAC_VER_04:
2127                 rtl8169sb_hw_phy_config(tp);
2128                 break;
2129         case RTL_GIGA_MAC_VER_05:
2130                 rtl8169scd_hw_phy_config(tp);
2131                 break;
2132         case RTL_GIGA_MAC_VER_06:
2133                 rtl8169sce_hw_phy_config(tp);
2134                 break;
2135         case RTL_GIGA_MAC_VER_07:
2136         case RTL_GIGA_MAC_VER_08:
2137         case RTL_GIGA_MAC_VER_09:
2138                 rtl8102e_hw_phy_config(tp);
2139                 break;
2140         case RTL_GIGA_MAC_VER_11:
2141                 rtl8168bb_hw_phy_config(tp);
2142                 break;
2143         case RTL_GIGA_MAC_VER_12:
2144                 rtl8168bef_hw_phy_config(tp);
2145                 break;
2146         case RTL_GIGA_MAC_VER_17:
2147                 rtl8168bef_hw_phy_config(tp);
2148                 break;
2149         case RTL_GIGA_MAC_VER_18:
2150                 rtl8168cp_1_hw_phy_config(tp);
2151                 break;
2152         case RTL_GIGA_MAC_VER_19:
2153                 rtl8168c_1_hw_phy_config(tp);
2154                 break;
2155         case RTL_GIGA_MAC_VER_20:
2156                 rtl8168c_2_hw_phy_config(tp);
2157                 break;
2158         case RTL_GIGA_MAC_VER_21:
2159                 rtl8168c_3_hw_phy_config(tp);
2160                 break;
2161         case RTL_GIGA_MAC_VER_22:
2162                 rtl8168c_4_hw_phy_config(tp);
2163                 break;
2164         case RTL_GIGA_MAC_VER_23:
2165         case RTL_GIGA_MAC_VER_24:
2166                 rtl8168cp_2_hw_phy_config(tp);
2167                 break;
2168         case RTL_GIGA_MAC_VER_25:
2169                 rtl8168d_1_hw_phy_config(tp);
2170                 break;
2171         case RTL_GIGA_MAC_VER_26:
2172                 rtl8168d_2_hw_phy_config(tp);
2173                 break;
2174         case RTL_GIGA_MAC_VER_27:
2175                 rtl8168d_3_hw_phy_config(tp);
2176                 break;
2177
2178         default:
2179                 break;
2180         }
2181 }
2182
2183 static void rtl8169_phy_timer(unsigned long __opaque)
2184 {
2185         struct net_device *dev = (struct net_device *)__opaque;
2186         struct rtl8169_private *tp = netdev_priv(dev);
2187         struct timer_list *timer = &tp->timer;
2188         void __iomem *ioaddr = tp->mmio_addr;
2189         unsigned long timeout = RTL8169_PHY_TIMEOUT;
2190
2191         assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
2192
2193         if (!(tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL))
2194                 return;
2195
2196         spin_lock_irq(&tp->lock);
2197
2198         if (tp->phy_reset_pending(tp)) {
2199                 /*
2200                  * A busy loop could burn quite a few cycles on nowadays CPU.
2201                  * Let's delay the execution of the timer for a few ticks.
2202                  */
2203                 timeout = HZ/10;
2204                 goto out_mod_timer;
2205         }
2206
2207         if (tp->link_ok(ioaddr))
2208                 goto out_unlock;
2209
2210         netif_warn(tp, link, dev, "PHY reset until link up\n");
2211
2212         tp->phy_reset_enable(tp);
2213
2214 out_mod_timer:
2215         mod_timer(timer, jiffies + timeout);
2216 out_unlock:
2217         spin_unlock_irq(&tp->lock);
2218 }
2219
2220 static inline void rtl8169_delete_timer(struct net_device *dev)
2221 {
2222         struct rtl8169_private *tp = netdev_priv(dev);
2223         struct timer_list *timer = &tp->timer;
2224
2225         if (tp->mac_version <= RTL_GIGA_MAC_VER_01)
2226                 return;
2227
2228         del_timer_sync(timer);
2229 }
2230
2231 static inline void rtl8169_request_timer(struct net_device *dev)
2232 {
2233         struct rtl8169_private *tp = netdev_priv(dev);
2234         struct timer_list *timer = &tp->timer;
2235
2236         if (tp->mac_version <= RTL_GIGA_MAC_VER_01)
2237                 return;
2238
2239         mod_timer(timer, jiffies + RTL8169_PHY_TIMEOUT);
2240 }
2241
2242 #ifdef CONFIG_NET_POLL_CONTROLLER
2243 /*
2244  * Polling 'interrupt' - used by things like netconsole to send skbs
2245  * without having to re-enable interrupts. It's not called while
2246  * the interrupt routine is executing.
2247  */
2248 static void rtl8169_netpoll(struct net_device *dev)
2249 {
2250         struct rtl8169_private *tp = netdev_priv(dev);
2251         struct pci_dev *pdev = tp->pci_dev;
2252
2253         disable_irq(pdev->irq);
2254         rtl8169_interrupt(pdev->irq, dev);
2255         enable_irq(pdev->irq);
2256 }
2257 #endif
2258
2259 static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
2260                                   void __iomem *ioaddr)
2261 {
2262         iounmap(ioaddr);
2263         pci_release_regions(pdev);
2264         pci_clear_mwi(pdev);
2265         pci_disable_device(pdev);
2266         free_netdev(dev);
2267 }
2268
2269 static void rtl8169_phy_reset(struct net_device *dev,
2270                               struct rtl8169_private *tp)
2271 {
2272         unsigned int i;
2273
2274         tp->phy_reset_enable(tp);
2275         for (i = 0; i < 100; i++) {
2276                 if (!tp->phy_reset_pending(tp))
2277                         return;
2278                 msleep(1);
2279         }
2280         netif_err(tp, link, dev, "PHY reset failed\n");
2281 }
2282
2283 static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
2284 {
2285         void __iomem *ioaddr = tp->mmio_addr;
2286
2287         rtl_hw_phy_config(dev);
2288
2289         if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
2290                 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
2291                 RTL_W8(0x82, 0x01);
2292         }
2293
2294         pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
2295
2296         if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
2297                 pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
2298
2299         if (tp->mac_version == RTL_GIGA_MAC_VER_02) {
2300                 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
2301                 RTL_W8(0x82, 0x01);
2302                 dprintk("Set PHY Reg 0x0bh = 0x00h\n");
2303                 rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0
2304         }
2305
2306         rtl8169_phy_reset(dev, tp);
2307
2308         /*
2309          * rtl8169_set_speed_xmii takes good care of the Fast Ethernet
2310          * only 8101. Don't panic.
2311          */
2312         rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL);
2313
2314         if (RTL_R8(PHYstatus) & TBI_Enable)
2315                 netif_info(tp, link, dev, "TBI auto-negotiating\n");
2316 }
2317
2318 static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
2319 {
2320         void __iomem *ioaddr = tp->mmio_addr;
2321         u32 high;
2322         u32 low;
2323
2324         low  = addr[0] | (addr[1] << 8) | (addr[2] << 16) | (addr[3] << 24);
2325         high = addr[4] | (addr[5] << 8);
2326
2327         spin_lock_irq(&tp->lock);
2328
2329         RTL_W8(Cfg9346, Cfg9346_Unlock);
2330
2331         RTL_W32(MAC4, high);
2332         RTL_R32(MAC4);
2333
2334         RTL_W32(MAC0, low);
2335         RTL_R32(MAC0);
2336
2337         RTL_W8(Cfg9346, Cfg9346_Lock);
2338
2339         spin_unlock_irq(&tp->lock);
2340 }
2341
2342 static int rtl_set_mac_address(struct net_device *dev, void *p)
2343 {
2344         struct rtl8169_private *tp = netdev_priv(dev);
2345         struct sockaddr *addr = p;
2346
2347         if (!is_valid_ether_addr(addr->sa_data))
2348                 return -EADDRNOTAVAIL;
2349
2350         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2351
2352         rtl_rar_set(tp, dev->dev_addr);
2353
2354         return 0;
2355 }
2356
2357 static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2358 {
2359         struct rtl8169_private *tp = netdev_priv(dev);
2360         struct mii_ioctl_data *data = if_mii(ifr);
2361
2362         return netif_running(dev) ? tp->do_ioctl(tp, data, cmd) : -ENODEV;
2363 }
2364
2365 static int rtl_xmii_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd)
2366 {
2367         switch (cmd) {
2368         case SIOCGMIIPHY:
2369                 data->phy_id = 32; /* Internal PHY */
2370                 return 0;
2371
2372         case SIOCGMIIREG:
2373                 data->val_out = rtl_readphy(tp, data->reg_num & 0x1f);
2374                 return 0;
2375
2376         case SIOCSMIIREG:
2377                 rtl_writephy(tp, data->reg_num & 0x1f, data->val_in);
2378                 return 0;
2379         }
2380         return -EOPNOTSUPP;
2381 }
2382
2383 static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd)
2384 {
2385         return -EOPNOTSUPP;
2386 }
2387
2388 static const struct rtl_cfg_info {
2389         void (*hw_start)(struct net_device *);
2390         unsigned int region;
2391         unsigned int align;
2392         u16 intr_event;
2393         u16 napi_event;
2394         unsigned features;
2395         u8 default_ver;
2396 } rtl_cfg_infos [] = {
2397         [RTL_CFG_0] = {
2398                 .hw_start       = rtl_hw_start_8169,
2399                 .region         = 1,
2400                 .align          = 0,
2401                 .intr_event     = SYSErr | LinkChg | RxOverflow |
2402                                   RxFIFOOver | TxErr | TxOK | RxOK | RxErr,
2403                 .napi_event     = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow,
2404                 .features       = RTL_FEATURE_GMII,
2405                 .default_ver    = RTL_GIGA_MAC_VER_01,
2406         },
2407         [RTL_CFG_1] = {
2408                 .hw_start       = rtl_hw_start_8168,
2409                 .region         = 2,
2410                 .align          = 8,
2411                 .intr_event     = SYSErr | LinkChg | RxOverflow |
2412                                   TxErr | TxOK | RxOK | RxErr,
2413                 .napi_event     = TxErr | TxOK | RxOK | RxOverflow,
2414                 .features       = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
2415                 .default_ver    = RTL_GIGA_MAC_VER_11,
2416         },
2417         [RTL_CFG_2] = {
2418                 .hw_start       = rtl_hw_start_8101,
2419                 .region         = 2,
2420                 .align          = 8,
2421                 .intr_event     = SYSErr | LinkChg | RxOverflow | PCSTimeout |
2422                                   RxFIFOOver | TxErr | TxOK | RxOK | RxErr,
2423                 .napi_event     = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow,
2424                 .features       = RTL_FEATURE_MSI,
2425                 .default_ver    = RTL_GIGA_MAC_VER_13,
2426         }
2427 };
2428
2429 /* Cfg9346_Unlock assumed. */
2430 static unsigned rtl_try_msi(struct pci_dev *pdev, void __iomem *ioaddr,
2431                             const struct rtl_cfg_info *cfg)
2432 {
2433         unsigned msi = 0;
2434         u8 cfg2;
2435
2436         cfg2 = RTL_R8(Config2) & ~MSIEnable;
2437         if (cfg->features & RTL_FEATURE_MSI) {
2438                 if (pci_enable_msi(pdev)) {
2439                         dev_info(&pdev->dev, "no MSI. Back to INTx.\n");
2440                 } else {
2441                         cfg2 |= MSIEnable;
2442                         msi = RTL_FEATURE_MSI;
2443                 }
2444         }
2445         RTL_W8(Config2, cfg2);
2446         return msi;
2447 }
2448
2449 static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp)
2450 {
2451         if (tp->features & RTL_FEATURE_MSI) {
2452                 pci_disable_msi(pdev);
2453                 tp->features &= ~RTL_FEATURE_MSI;
2454         }
2455 }
2456
2457 static const struct net_device_ops rtl8169_netdev_ops = {
2458         .ndo_open               = rtl8169_open,
2459         .ndo_stop               = rtl8169_close,
2460         .ndo_get_stats          = rtl8169_get_stats,
2461         .ndo_start_xmit         = rtl8169_start_xmit,
2462         .ndo_tx_timeout         = rtl8169_tx_timeout,
2463         .ndo_validate_addr      = eth_validate_addr,
2464         .ndo_change_mtu         = rtl8169_change_mtu,
2465         .ndo_set_mac_address    = rtl_set_mac_address,
2466         .ndo_do_ioctl           = rtl8169_ioctl,
2467         .ndo_set_multicast_list = rtl_set_rx_mode,
2468 #ifdef CONFIG_R8169_VLAN
2469         .ndo_vlan_rx_register   = rtl8169_vlan_rx_register,
2470 #endif
2471 #ifdef CONFIG_NET_POLL_CONTROLLER
2472         .ndo_poll_controller    = rtl8169_netpoll,
2473 #endif
2474
2475 };
2476
2477 static int __devinit
2478 rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2479 {
2480         const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
2481         const unsigned int region = cfg->region;
2482         struct rtl8169_private *tp;
2483         struct mii_if_info *mii;
2484         struct net_device *dev;
2485         void __iomem *ioaddr;
2486         unsigned int i;
2487         int rc;
2488
2489         if (netif_msg_drv(&debug)) {
2490                 printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
2491                        MODULENAME, RTL8169_VERSION);
2492         }
2493
2494         dev = alloc_etherdev(sizeof (*tp));
2495         if (!dev) {
2496                 if (netif_msg_drv(&debug))
2497                         dev_err(&pdev->dev, "unable to alloc new ethernet\n");
2498                 rc = -ENOMEM;
2499                 goto out;
2500         }
2501
2502         SET_NETDEV_DEV(dev, &pdev->dev);
2503         dev->netdev_ops = &rtl8169_netdev_ops;
2504         tp = netdev_priv(dev);
2505         tp->dev = dev;
2506         tp->pci_dev = pdev;
2507         tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
2508
2509         mii = &tp->mii;
2510         mii->dev = dev;
2511         mii->mdio_read = rtl_mdio_read;
2512         mii->mdio_write = rtl_mdio_write;
2513         mii->phy_id_mask = 0x1f;
2514         mii->reg_num_mask = 0x1f;
2515         mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
2516
2517         /* enable device (incl. PCI PM wakeup and hotplug setup) */
2518         rc = pci_enable_device(pdev);
2519         if (rc < 0) {
2520                 netif_err(tp, probe, dev, "enable failure\n");
2521                 goto err_out_free_dev_1;
2522         }
2523
2524         if (pci_set_mwi(pdev) < 0)
2525                 netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n");
2526
2527         /* make sure PCI base addr 1 is MMIO */
2528         if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
2529                 netif_err(tp, probe, dev,
2530                           "region #%d not an MMIO resource, aborting\n",
2531                           region);
2532                 rc = -ENODEV;
2533                 goto err_out_mwi_2;
2534         }
2535
2536         /* check for weird/broken PCI region reporting */
2537         if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
2538                 netif_err(tp, probe, dev,
2539                           "Invalid PCI region size(s), aborting\n");
2540                 rc = -ENODEV;
2541                 goto err_out_mwi_2;
2542         }
2543
2544         rc = pci_request_regions(pdev, MODULENAME);
2545         if (rc < 0) {
2546                 netif_err(tp, probe, dev, "could not request regions\n");
2547                 goto err_out_mwi_2;
2548         }
2549
2550         tp->cp_cmd = PCIMulRW | RxChkSum;
2551
2552         if ((sizeof(dma_addr_t) > 4) &&
2553             !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
2554                 tp->cp_cmd |= PCIDAC;
2555                 dev->features |= NETIF_F_HIGHDMA;
2556         } else {
2557                 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2558                 if (rc < 0) {
2559                         netif_err(tp, probe, dev, "DMA configuration failed\n");
2560                         goto err_out_free_res_3;
2561                 }
2562         }
2563
2564         /* ioremap MMIO region */
2565         ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
2566         if (!ioaddr) {
2567                 netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
2568                 rc = -EIO;
2569                 goto err_out_free_res_3;
2570         }
2571
2572         tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
2573         if (!tp->pcie_cap)
2574                 netif_info(tp, probe, dev, "no PCI Express capability\n");
2575
2576         RTL_W16(IntrMask, 0x0000);
2577
2578         /* Soft reset the chip. */
2579         RTL_W8(ChipCmd, CmdReset);
2580
2581         /* Check that the chip has finished the reset. */
2582         for (i = 0; i < 100; i++) {
2583                 if ((RTL_R8(ChipCmd) & CmdReset) == 0)
2584                         break;
2585                 msleep_interruptible(1);
2586         }
2587
2588         RTL_W16(IntrStatus, 0xffff);
2589
2590         pci_set_master(pdev);
2591
2592         /* Identify chip attached to board */
2593         rtl8169_get_mac_version(tp, ioaddr);
2594
2595         /* Use appropriate default if unknown */
2596         if (tp->mac_version == RTL_GIGA_MAC_NONE) {
2597                 netif_notice(tp, probe, dev,
2598                              "unknown MAC, using family default\n");
2599                 tp->mac_version = cfg->default_ver;
2600         }
2601
2602         rtl8169_print_mac_version(tp);
2603
2604         for (i = 0; i < ARRAY_SIZE(rtl_chip_info); i++) {
2605                 if (tp->mac_version == rtl_chip_info[i].mac_version)
2606                         break;
2607         }
2608         if (i == ARRAY_SIZE(rtl_chip_info)) {
2609                 dev_err(&pdev->dev,
2610                         "driver bug, MAC version not found in rtl_chip_info\n");
2611                 goto err_out_msi_4;
2612         }
2613         tp->chipset = i;
2614
2615         RTL_W8(Cfg9346, Cfg9346_Unlock);
2616         RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
2617         RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
2618         if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0)
2619                 tp->features |= RTL_FEATURE_WOL;
2620         if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
2621                 tp->features |= RTL_FEATURE_WOL;
2622         tp->features |= rtl_try_msi(pdev, ioaddr, cfg);
2623         RTL_W8(Cfg9346, Cfg9346_Lock);
2624
2625         if ((tp->mac_version <= RTL_GIGA_MAC_VER_06) &&
2626             (RTL_R8(PHYstatus) & TBI_Enable)) {
2627                 tp->set_speed = rtl8169_set_speed_tbi;
2628                 tp->get_settings = rtl8169_gset_tbi;
2629                 tp->phy_reset_enable = rtl8169_tbi_reset_enable;
2630                 tp->phy_reset_pending = rtl8169_tbi_reset_pending;
2631                 tp->link_ok = rtl8169_tbi_link_ok;
2632                 tp->do_ioctl = rtl_tbi_ioctl;
2633
2634                 tp->phy_1000_ctrl_reg = ADVERTISE_1000FULL; /* Implied by TBI */
2635         } else {
2636                 tp->set_speed = rtl8169_set_speed_xmii;
2637                 tp->get_settings = rtl8169_gset_xmii;
2638                 tp->phy_reset_enable = rtl8169_xmii_reset_enable;
2639                 tp->phy_reset_pending = rtl8169_xmii_reset_pending;
2640                 tp->link_ok = rtl8169_xmii_link_ok;
2641                 tp->do_ioctl = rtl_xmii_ioctl;
2642         }
2643
2644         spin_lock_init(&tp->lock);
2645
2646         tp->mmio_addr = ioaddr;
2647
2648         /* Get MAC address */
2649         for (i = 0; i < MAC_ADDR_LEN; i++)
2650                 dev->dev_addr[i] = RTL_R8(MAC0 + i);
2651         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
2652
2653         SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
2654         dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
2655         dev->irq = pdev->irq;
2656         dev->base_addr = (unsigned long) ioaddr;
2657
2658         netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
2659
2660 #ifdef CONFIG_R8169_VLAN
2661         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2662 #endif
2663         dev->features |= NETIF_F_GRO;
2664
2665         tp->intr_mask = 0xffff;
2666         tp->hw_start = cfg->hw_start;
2667         tp->intr_event = cfg->intr_event;
2668         tp->napi_event = cfg->napi_event;
2669
2670         init_timer(&tp->timer);
2671         tp->timer.data = (unsigned long) dev;
2672         tp->timer.function = rtl8169_phy_timer;
2673
2674         rc = register_netdev(dev);
2675         if (rc < 0)
2676                 goto err_out_msi_4;
2677
2678         pci_set_drvdata(pdev, dev);
2679
2680         netif_info(tp, probe, dev, "%s at 0x%lx, %pM, XID %08x IRQ %d\n",
2681                    rtl_chip_info[tp->chipset].name,
2682                    dev->base_addr, dev->dev_addr,
2683                    (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), dev->irq);
2684
2685         rtl8169_init_phy(dev, tp);
2686
2687         /*
2688          * Pretend we are using VLANs; This bypasses a nasty bug where
2689          * Interrupts stop flowing on high load on 8110SCd controllers.
2690          */
2691         if (tp->mac_version == RTL_GIGA_MAC_VER_05)
2692                 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | RxVlan);
2693
2694         device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
2695
2696         if (pci_dev_run_wake(pdev))
2697                 pm_runtime_put_noidle(&pdev->dev);
2698
2699 out:
2700         return rc;
2701
2702 err_out_msi_4:
2703         rtl_disable_msi(pdev, tp);
2704         iounmap(ioaddr);
2705 err_out_free_res_3:
2706         pci_release_regions(pdev);
2707 err_out_mwi_2:
2708         pci_clear_mwi(pdev);
2709         pci_disable_device(pdev);
2710 err_out_free_dev_1:
2711         free_netdev(dev);
2712         goto out;
2713 }
2714
2715 static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
2716 {
2717         struct net_device *dev = pci_get_drvdata(pdev);
2718         struct rtl8169_private *tp = netdev_priv(dev);
2719
2720         cancel_delayed_work_sync(&tp->task);
2721
2722         unregister_netdev(dev);
2723
2724         if (pci_dev_run_wake(pdev))
2725                 pm_runtime_get_noresume(&pdev->dev);
2726
2727         /* restore original MAC address */
2728         rtl_rar_set(tp, dev->perm_addr);
2729
2730         rtl_disable_msi(pdev, tp);
2731         rtl8169_release_board(pdev, dev, tp->mmio_addr);
2732         pci_set_drvdata(pdev, NULL);
2733 }
2734
2735 static int rtl8169_open(struct net_device *dev)
2736 {
2737         struct rtl8169_private *tp = netdev_priv(dev);
2738         struct pci_dev *pdev = tp->pci_dev;
2739         int retval = -ENOMEM;
2740
2741         pm_runtime_get_sync(&pdev->dev);
2742
2743         /*
2744          * Rx and Tx desscriptors needs 256 bytes alignment.
2745          * dma_alloc_coherent provides more.
2746          */
2747         tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
2748                                              &tp->TxPhyAddr, GFP_KERNEL);
2749         if (!tp->TxDescArray)
2750                 goto err_pm_runtime_put;
2751
2752         tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
2753                                              &tp->RxPhyAddr, GFP_KERNEL);
2754         if (!tp->RxDescArray)
2755                 goto err_free_tx_0;
2756
2757         retval = rtl8169_init_ring(dev);
2758         if (retval < 0)
2759                 goto err_free_rx_1;
2760
2761         INIT_DELAYED_WORK(&tp->task, NULL);
2762
2763         smp_mb();
2764
2765         retval = request_irq(dev->irq, rtl8169_interrupt,
2766                              (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED,
2767                              dev->name, dev);
2768         if (retval < 0)
2769                 goto err_release_ring_2;
2770
2771         napi_enable(&tp->napi);
2772
2773         rtl_hw_start(dev);
2774
2775         rtl8169_request_timer(dev);
2776
2777         tp->saved_wolopts = 0;
2778         pm_runtime_put_noidle(&pdev->dev);
2779
2780         rtl8169_check_link_status(dev, tp, tp->mmio_addr);
2781 out:
2782         return retval;
2783
2784 err_release_ring_2:
2785         rtl8169_rx_clear(tp);
2786 err_free_rx_1:
2787         dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
2788                           tp->RxPhyAddr);
2789         tp->RxDescArray = NULL;
2790 err_free_tx_0:
2791         dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
2792                           tp->TxPhyAddr);
2793         tp->TxDescArray = NULL;
2794 err_pm_runtime_put:
2795         pm_runtime_put_noidle(&pdev->dev);
2796         goto out;
2797 }
2798
2799 static void rtl8169_hw_reset(void __iomem *ioaddr)
2800 {
2801         /* Disable interrupts */
2802         rtl8169_irq_mask_and_ack(ioaddr);
2803
2804         /* Reset the chipset */
2805         RTL_W8(ChipCmd, CmdReset);
2806
2807         /* PCI commit */
2808         RTL_R8(ChipCmd);
2809 }
2810
2811 static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
2812 {
2813         void __iomem *ioaddr = tp->mmio_addr;
2814         u32 cfg = rtl8169_rx_config;
2815
2816         cfg |= (RTL_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
2817         RTL_W32(RxConfig, cfg);
2818
2819         /* Set DMA burst size and Interframe Gap Time */
2820         RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
2821                 (InterFrameGap << TxInterFrameGapShift));
2822 }
2823
2824 static void rtl_hw_start(struct net_device *dev)
2825 {
2826         struct rtl8169_private *tp = netdev_priv(dev);
2827         void __iomem *ioaddr = tp->mmio_addr;
2828         unsigned int i;
2829
2830         /* Soft reset the chip. */
2831         RTL_W8(ChipCmd, CmdReset);
2832
2833         /* Check that the chip has finished the reset. */
2834         for (i = 0; i < 100; i++) {
2835                 if ((RTL_R8(ChipCmd) & CmdReset) == 0)
2836                         break;
2837                 msleep_interruptible(1);
2838         }
2839
2840         tp->hw_start(dev);
2841
2842         netif_start_queue(dev);
2843 }
2844
2845
2846 static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp,
2847                                          void __iomem *ioaddr)
2848 {
2849         /*
2850          * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
2851          * register to be written before TxDescAddrLow to work.
2852          * Switching from MMIO to I/O access fixes the issue as well.
2853          */
2854         RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32);
2855         RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32));
2856         RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32);
2857         RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32));
2858 }
2859
2860 static u16 rtl_rw_cpluscmd(void __iomem *ioaddr)
2861 {
2862         u16 cmd;
2863
2864         cmd = RTL_R16(CPlusCmd);
2865         RTL_W16(CPlusCmd, cmd);
2866         return cmd;
2867 }
2868
2869 static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz)
2870 {
2871         /* Low hurts. Let's disable the filtering. */
2872         RTL_W16(RxMaxSize, rx_buf_sz + 1);
2873 }
2874
2875 static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
2876 {
2877         static const struct {
2878                 u32 mac_version;
2879                 u32 clk;
2880                 u32 val;
2881         } cfg2_info [] = {
2882                 { RTL_GIGA_MAC_VER_05, PCI_Clock_33MHz, 0x000fff00 }, // 8110SCd
2883                 { RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff },
2884                 { RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe
2885                 { RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff }
2886         }, *p = cfg2_info;
2887         unsigned int i;
2888         u32 clk;
2889
2890         clk = RTL_R8(Config2) & PCI_Clock_66MHz;
2891         for (i = 0; i < ARRAY_SIZE(cfg2_info); i++, p++) {
2892                 if ((p->mac_version == mac_version) && (p->clk == clk)) {
2893                         RTL_W32(0x7c, p->val);
2894                         break;
2895                 }
2896         }
2897 }
2898
2899 static void rtl_hw_start_8169(struct net_device *dev)
2900 {
2901         struct rtl8169_private *tp = netdev_priv(dev);
2902         void __iomem *ioaddr = tp->mmio_addr;
2903         struct pci_dev *pdev = tp->pci_dev;
2904
2905         if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
2906                 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW);
2907                 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
2908         }
2909
2910         RTL_W8(Cfg9346, Cfg9346_Unlock);
2911         if ((tp->mac_version == RTL_GIGA_MAC_VER_01) ||
2912             (tp->mac_version == RTL_GIGA_MAC_VER_02) ||
2913             (tp->mac_version == RTL_GIGA_MAC_VER_03) ||
2914             (tp->mac_version == RTL_GIGA_MAC_VER_04))
2915                 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
2916
2917         RTL_W8(EarlyTxThres, NoEarlyTx);
2918
2919         rtl_set_rx_max_size(ioaddr, rx_buf_sz);
2920
2921         if ((tp->mac_version == RTL_GIGA_MAC_VER_01) ||
2922             (tp->mac_version == RTL_GIGA_MAC_VER_02) ||
2923             (tp->mac_version == RTL_GIGA_MAC_VER_03) ||
2924             (tp->mac_version == RTL_GIGA_MAC_VER_04))
2925                 rtl_set_rx_tx_config_registers(tp);
2926
2927         tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
2928
2929         if ((tp->mac_version == RTL_GIGA_MAC_VER_02) ||
2930             (tp->mac_version == RTL_GIGA_MAC_VER_03)) {
2931                 dprintk("Set MAC Reg C+CR Offset 0xE0. "
2932                         "Bit-3 and bit-14 MUST be 1\n");
2933                 tp->cp_cmd |= (1 << 14);
2934         }
2935
2936         RTL_W16(CPlusCmd, tp->cp_cmd);
2937
2938         rtl8169_set_magic_reg(ioaddr, tp->mac_version);
2939
2940         /*
2941          * Undocumented corner. Supposedly:
2942          * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets
2943          */
2944         RTL_W16(IntrMitigate, 0x0000);
2945
2946         rtl_set_rx_tx_desc_registers(tp, ioaddr);
2947
2948         if ((tp->mac_version != RTL_GIGA_MAC_VER_01) &&
2949             (tp->mac_version != RTL_GIGA_MAC_VER_02) &&
2950             (tp->mac_version != RTL_GIGA_MAC_VER_03) &&
2951             (tp->mac_version != RTL_GIGA_MAC_VER_04)) {
2952                 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
2953                 rtl_set_rx_tx_config_registers(tp);
2954         }
2955
2956         RTL_W8(Cfg9346, Cfg9346_Lock);
2957
2958         /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
2959         RTL_R8(IntrMask);
2960
2961         RTL_W32(RxMissed, 0);
2962
2963         rtl_set_rx_mode(dev);
2964
2965         /* no early-rx interrupts */
2966         RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
2967
2968         /* Enable all known interrupts by setting the interrupt mask. */
2969         RTL_W16(IntrMask, tp->intr_event);
2970 }
2971
2972 static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
2973 {
2974         struct net_device *dev = pci_get_drvdata(pdev);
2975         struct rtl8169_private *tp = netdev_priv(dev);
2976         int cap = tp->pcie_cap;
2977
2978         if (cap) {
2979                 u16 ctl;
2980
2981                 pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl);
2982                 ctl = (ctl & ~PCI_EXP_DEVCTL_READRQ) | force;
2983                 pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl);
2984         }
2985 }
2986
2987 static void rtl_csi_access_enable(void __iomem *ioaddr)
2988 {
2989         u32 csi;
2990
2991         csi = rtl_csi_read(ioaddr, 0x070c) & 0x00ffffff;
2992         rtl_csi_write(ioaddr, 0x070c, csi | 0x27000000);
2993 }
2994
2995 struct ephy_info {
2996         unsigned int offset;
2997         u16 mask;
2998         u16 bits;
2999 };
3000
3001 static void rtl_ephy_init(void __iomem *ioaddr, const struct ephy_info *e, int len)
3002 {
3003         u16 w;
3004
3005         while (len-- > 0) {
3006                 w = (rtl_ephy_read(ioaddr, e->offset) & ~e->mask) | e->bits;
3007                 rtl_ephy_write(ioaddr, e->offset, w);
3008                 e++;
3009         }
3010 }
3011
3012 static void rtl_disable_clock_request(struct pci_dev *pdev)
3013 {
3014         struct net_device *dev = pci_get_drvdata(pdev);
3015         struct rtl8169_private *tp = netdev_priv(dev);
3016         int cap = tp->pcie_cap;
3017
3018         if (cap) {
3019                 u16 ctl;
3020
3021                 pci_read_config_word(pdev, cap + PCI_EXP_LNKCTL, &ctl);
3022                 ctl &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
3023                 pci_write_config_word(pdev, cap + PCI_EXP_LNKCTL, ctl);
3024         }
3025 }
3026
3027 #define R8168_CPCMD_QUIRK_MASK (\
3028         EnableBist | \
3029         Mac_dbgo_oe | \
3030         Force_half_dup | \
3031         Force_rxflow_en | \
3032         Force_txflow_en | \
3033         Cxpl_dbg_sel | \
3034         ASF | \
3035         PktCntrDisable | \
3036         Mac_dbgo_sel)
3037
3038 static void rtl_hw_start_8168bb(void __iomem *ioaddr, struct pci_dev *pdev)
3039 {
3040         RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
3041
3042         RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
3043
3044         rtl_tx_performance_tweak(pdev,
3045                 (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
3046 }
3047
3048 static void rtl_hw_start_8168bef(void __iomem *ioaddr, struct pci_dev *pdev)
3049 {
3050         rtl_hw_start_8168bb(ioaddr, pdev);
3051
3052         RTL_W8(MaxTxPacketSize, TxPacketMax);
3053
3054         RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
3055 }
3056
3057 static void __rtl_hw_start_8168cp(void __iomem *ioaddr, struct pci_dev *pdev)
3058 {
3059         RTL_W8(Config1, RTL_R8(Config1) | Speed_down);
3060
3061         RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
3062
3063         rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
3064
3065         rtl_disable_clock_request(pdev);
3066
3067         RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
3068 }
3069
3070 static void rtl_hw_start_8168cp_1(void __iomem *ioaddr, struct pci_dev *pdev)
3071 {
3072         static const struct ephy_info e_info_8168cp[] = {
3073                 { 0x01, 0,      0x0001 },
3074                 { 0x02, 0x0800, 0x1000 },
3075                 { 0x03, 0,      0x0042 },
3076                 { 0x06, 0x0080, 0x0000 },
3077                 { 0x07, 0,      0x2000 }
3078         };
3079
3080         rtl_csi_access_enable(ioaddr);
3081
3082         rtl_ephy_init(ioaddr, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
3083
3084         __rtl_hw_start_8168cp(ioaddr, pdev);
3085 }
3086
3087 static void rtl_hw_start_8168cp_2(void __iomem *ioaddr, struct pci_dev *pdev)
3088 {
3089         rtl_csi_access_enable(ioaddr);
3090
3091         RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
3092
3093         rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
3094
3095         RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
3096 }
3097
3098 static void rtl_hw_start_8168cp_3(void __iomem *ioaddr, struct pci_dev *pdev)
3099 {
3100         rtl_csi_access_enable(ioaddr);
3101
3102         RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
3103
3104         /* Magic. */
3105         RTL_W8(DBG_REG, 0x20);
3106
3107         RTL_W8(MaxTxPacketSize, TxPacketMax);
3108
3109         rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
3110
3111         RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
3112 }
3113
3114 static void rtl_hw_start_8168c_1(void __iomem *ioaddr, struct pci_dev *pdev)
3115 {
3116         static const struct ephy_info e_info_8168c_1[] = {
3117                 { 0x02, 0x0800, 0x1000 },
3118                 { 0x03, 0,      0x0002 },
3119                 { 0x06, 0x0080, 0x0000 }
3120         };
3121
3122         rtl_csi_access_enable(ioaddr);
3123
3124         RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
3125
3126         rtl_ephy_init(ioaddr, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1));
3127
3128         __rtl_hw_start_8168cp(ioaddr, pdev);
3129 }
3130
3131 static void rtl_hw_start_8168c_2(void __iomem *ioaddr, struct pci_dev *pdev)
3132 {
3133         static const struct ephy_info e_info_8168c_2[] = {
3134                 { 0x01, 0,      0x0001 },
3135                 { 0x03, 0x0400, 0x0220 }
3136         };
3137
3138         rtl_csi_access_enable(ioaddr);
3139
3140         rtl_ephy_init(ioaddr, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
3141
3142         __rtl_hw_start_8168cp(ioaddr, pdev);
3143 }
3144
3145 static void rtl_hw_start_8168c_3(void __iomem *ioaddr, struct pci_dev *pdev)
3146 {
3147         rtl_hw_start_8168c_2(ioaddr, pdev);
3148 }
3149
3150 static void rtl_hw_start_8168c_4(void __iomem *ioaddr, struct pci_dev *pdev)
3151 {
3152         rtl_csi_access_enable(ioaddr);
3153
3154         __rtl_hw_start_8168cp(ioaddr, pdev);
3155 }
3156
3157 static void rtl_hw_start_8168d(void __iomem *ioaddr, struct pci_dev *pdev)
3158 {
3159         rtl_csi_access_enable(ioaddr);
3160
3161         rtl_disable_clock_request(pdev);
3162
3163         RTL_W8(MaxTxPacketSize, TxPacketMax);
3164
3165         rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
3166
3167         RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
3168 }
3169
3170 static void rtl_hw_start_8168(struct net_device *dev)
3171 {
3172         struct rtl8169_private *tp = netdev_priv(dev);
3173         void __iomem *ioaddr = tp->mmio_addr;
3174         struct pci_dev *pdev = tp->pci_dev;
3175
3176         RTL_W8(Cfg9346, Cfg9346_Unlock);
3177
3178         RTL_W8(MaxTxPacketSize, TxPacketMax);
3179
3180         rtl_set_rx_max_size(ioaddr, rx_buf_sz);
3181
3182         tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1;
3183
3184         RTL_W16(CPlusCmd, tp->cp_cmd);
3185
3186         RTL_W16(IntrMitigate, 0x5151);
3187
3188         /* Work around for RxFIFO overflow. */
3189         if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
3190                 tp->intr_event |= RxFIFOOver | PCSTimeout;
3191                 tp->intr_event &= ~RxOverflow;
3192         }
3193
3194         rtl_set_rx_tx_desc_registers(tp, ioaddr);
3195
3196         rtl_set_rx_mode(dev);
3197
3198         RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
3199                 (InterFrameGap << TxInterFrameGapShift));
3200
3201         RTL_R8(IntrMask);
3202
3203         switch (tp->mac_version) {
3204         case RTL_GIGA_MAC_VER_11:
3205                 rtl_hw_start_8168bb(ioaddr, pdev);
3206         break;
3207
3208         case RTL_GIGA_MAC_VER_12:
3209         case RTL_GIGA_MAC_VER_17:
3210                 rtl_hw_start_8168bef(ioaddr, pdev);
3211         break;
3212
3213         case RTL_GIGA_MAC_VER_18:
3214                 rtl_hw_start_8168cp_1(ioaddr, pdev);
3215         break;
3216
3217         case RTL_GIGA_MAC_VER_19:
3218                 rtl_hw_start_8168c_1(ioaddr, pdev);
3219         break;
3220
3221         case RTL_GIGA_MAC_VER_20:
3222                 rtl_hw_start_8168c_2(ioaddr, pdev);
3223         break;
3224
3225         case RTL_GIGA_MAC_VER_21:
3226                 rtl_hw_start_8168c_3(ioaddr, pdev);
3227         break;
3228
3229         case RTL_GIGA_MAC_VER_22:
3230                 rtl_hw_start_8168c_4(ioaddr, pdev);
3231         break;
3232
3233         case RTL_GIGA_MAC_VER_23:
3234                 rtl_hw_start_8168cp_2(ioaddr, pdev);
3235         break;
3236
3237         case RTL_GIGA_MAC_VER_24:
3238                 rtl_hw_start_8168cp_3(ioaddr, pdev);
3239         break;
3240
3241         case RTL_GIGA_MAC_VER_25:
3242         case RTL_GIGA_MAC_VER_26:
3243         case RTL_GIGA_MAC_VER_27:
3244                 rtl_hw_start_8168d(ioaddr, pdev);
3245         break;
3246
3247         default:
3248                 printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
3249                         dev->name, tp->mac_version);
3250         break;
3251         }
3252
3253         RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
3254
3255         RTL_W8(Cfg9346, Cfg9346_Lock);
3256
3257         RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
3258
3259         RTL_W16(IntrMask, tp->intr_event);
3260 }
3261
3262 #define R810X_CPCMD_QUIRK_MASK (\
3263         EnableBist | \
3264         Mac_dbgo_oe | \
3265         Force_half_dup | \
3266         Force_rxflow_en | \
3267         Force_txflow_en | \
3268         Cxpl_dbg_sel | \
3269         ASF | \
3270         PktCntrDisable | \
3271         PCIDAC | \
3272         PCIMulRW)
3273
3274 static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
3275 {
3276         static const struct ephy_info e_info_8102e_1[] = {
3277                 { 0x01, 0, 0x6e65 },
3278                 { 0x02, 0, 0x091f },
3279                 { 0x03, 0, 0xc2f9 },
3280                 { 0x06, 0, 0xafb5 },
3281                 { 0x07, 0, 0x0e00 },
3282                 { 0x19, 0, 0xec80 },
3283                 { 0x01, 0, 0x2e65 },
3284                 { 0x01, 0, 0x6e65 }
3285         };
3286         u8 cfg1;
3287
3288         rtl_csi_access_enable(ioaddr);
3289
3290         RTL_W8(DBG_REG, FIX_NAK_1);
3291
3292         rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
3293
3294         RTL_W8(Config1,
3295                LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable);
3296         RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
3297
3298         cfg1 = RTL_R8(Config1);
3299         if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
3300                 RTL_W8(Config1, cfg1 & ~LEDS0);
3301
3302         RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK);
3303
3304         rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
3305 }
3306
3307 static void rtl_hw_start_8102e_2(void __iomem *ioaddr, struct pci_dev *pdev)
3308 {
3309         rtl_csi_access_enable(ioaddr);
3310
3311         rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
3312
3313         RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable);
3314         RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
3315
3316         RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK);
3317 }
3318
3319 static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev)
3320 {
3321         rtl_hw_start_8102e_2(ioaddr, pdev);
3322
3323         rtl_ephy_write(ioaddr, 0x03, 0xc2f9);
3324 }
3325
3326 static void rtl_hw_start_8101(struct net_device *dev)
3327 {
3328         struct rtl8169_private *tp = netdev_priv(dev);
3329         void __iomem *ioaddr = tp->mmio_addr;
3330         struct pci_dev *pdev = tp->pci_dev;
3331
3332         if ((tp->mac_version == RTL_GIGA_MAC_VER_13) ||
3333             (tp->mac_version == RTL_GIGA_MAC_VER_16)) {
3334                 int cap = tp->pcie_cap;
3335
3336                 if (cap) {
3337                         pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL,
3338                                               PCI_EXP_DEVCTL_NOSNOOP_EN);
3339                 }
3340         }
3341
3342         switch (tp->mac_version) {
3343         case RTL_GIGA_MAC_VER_07:
3344                 rtl_hw_start_8102e_1(ioaddr, pdev);
3345                 break;
3346
3347         case RTL_GIGA_MAC_VER_08:
3348                 rtl_hw_start_8102e_3(ioaddr, pdev);
3349                 break;
3350
3351         case RTL_GIGA_MAC_VER_09:
3352                 rtl_hw_start_8102e_2(ioaddr, pdev);
3353                 break;
3354         }
3355
3356         RTL_W8(Cfg9346, Cfg9346_Unlock);
3357
3358         RTL_W8(MaxTxPacketSize, TxPacketMax);
3359
3360         rtl_set_rx_max_size(ioaddr, rx_buf_sz);
3361
3362         tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
3363
3364         RTL_W16(CPlusCmd, tp->cp_cmd);
3365
3366         RTL_W16(IntrMitigate, 0x0000);
3367
3368         rtl_set_rx_tx_desc_registers(tp, ioaddr);
3369
3370         RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
3371         rtl_set_rx_tx_config_registers(tp);
3372
3373         RTL_W8(Cfg9346, Cfg9346_Lock);
3374
3375         RTL_R8(IntrMask);
3376
3377         rtl_set_rx_mode(dev);
3378
3379         RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
3380
3381         RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
3382
3383         RTL_W16(IntrMask, tp->intr_event);
3384 }
3385
3386 static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
3387 {
3388         if (new_mtu < ETH_ZLEN || new_mtu > SafeMtu)
3389                 return -EINVAL;
3390
3391         dev->mtu = new_mtu;
3392         return 0;
3393 }
3394
3395 static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
3396 {
3397         desc->addr = cpu_to_le64(0x0badbadbadbadbadull);
3398         desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
3399 }
3400
3401 static void rtl8169_free_rx_databuff(struct rtl8169_private *tp,
3402                                      void **data_buff, struct RxDesc *desc)
3403 {
3404         dma_unmap_single(&tp->pci_dev->dev, le64_to_cpu(desc->addr), rx_buf_sz,
3405                          DMA_FROM_DEVICE);
3406
3407         kfree(*data_buff);
3408         *data_buff = NULL;
3409         rtl8169_make_unusable_by_asic(desc);
3410 }
3411
3412 static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
3413 {
3414         u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
3415
3416         desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz);
3417 }
3418
3419 static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
3420                                        u32 rx_buf_sz)
3421 {
3422         desc->addr = cpu_to_le64(mapping);
3423         wmb();
3424         rtl8169_mark_to_asic(desc, rx_buf_sz);
3425 }
3426
3427 static inline void *rtl8169_align(void *data)
3428 {
3429         return (void *)ALIGN((long)data, 16);
3430 }
3431
3432 static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
3433                                              struct RxDesc *desc)
3434 {
3435         void *data;
3436         dma_addr_t mapping;
3437         struct device *d = &tp->pci_dev->dev;
3438         struct net_device *dev = tp->dev;
3439         int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
3440
3441         data = kmalloc_node(rx_buf_sz, GFP_KERNEL, node);
3442         if (!data)
3443                 return NULL;
3444
3445         if (rtl8169_align(data) != data) {
3446                 kfree(data);
3447                 data = kmalloc_node(rx_buf_sz + 15, GFP_KERNEL, node);
3448                 if (!data)
3449                         return NULL;
3450         }
3451
3452         mapping = dma_map_single(d, rtl8169_align(data), rx_buf_sz,
3453                                  DMA_FROM_DEVICE);
3454         if (unlikely(dma_mapping_error(d, mapping))) {
3455                 if (net_ratelimit())
3456                         netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n");
3457                 goto err_out;
3458         }
3459
3460         rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
3461         return data;
3462
3463 err_out:
3464         kfree(data);
3465         return NULL;
3466 }
3467
3468 static void rtl8169_rx_clear(struct rtl8169_private *tp)
3469 {
3470         unsigned int i;
3471
3472         for (i = 0; i < NUM_RX_DESC; i++) {
3473                 if (tp->Rx_databuff[i]) {
3474                         rtl8169_free_rx_databuff(tp, tp->Rx_databuff + i,
3475                                             tp->RxDescArray + i);
3476                 }
3477         }
3478 }
3479
3480 static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc)
3481 {
3482         desc->opts1 |= cpu_to_le32(RingEnd);
3483 }
3484
3485 static int rtl8169_rx_fill(struct rtl8169_private *tp)
3486 {
3487         unsigned int i;
3488
3489         for (i = 0; i < NUM_RX_DESC; i++) {
3490                 void *data;
3491
3492                 if (tp->Rx_databuff[i])
3493                         continue;
3494
3495                 data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i);
3496                 if (!data) {
3497                         rtl8169_make_unusable_by_asic(tp->RxDescArray + i);
3498                         goto err_out;
3499                 }
3500                 tp->Rx_databuff[i] = data;
3501         }
3502
3503         rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
3504         return 0;
3505
3506 err_out:
3507         rtl8169_rx_clear(tp);
3508         return -ENOMEM;
3509 }
3510
3511 static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
3512 {
3513         tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
3514 }
3515
3516 static int rtl8169_init_ring(struct net_device *dev)
3517 {
3518         struct rtl8169_private *tp = netdev_priv(dev);
3519
3520         rtl8169_init_ring_indexes(tp);
3521
3522         memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
3523         memset(tp->Rx_databuff, 0x0, NUM_RX_DESC * sizeof(void *));
3524
3525         return rtl8169_rx_fill(tp);
3526 }
3527
3528 static void rtl8169_unmap_tx_skb(struct device *d, struct ring_info *tx_skb,
3529                                  struct TxDesc *desc)
3530 {
3531         unsigned int len = tx_skb->len;
3532
3533         dma_unmap_single(d, le64_to_cpu(desc->addr), len, DMA_TO_DEVICE);
3534
3535         desc->opts1 = 0x00;
3536         desc->opts2 = 0x00;
3537         desc->addr = 0x00;
3538         tx_skb->len = 0;
3539 }
3540
3541 static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
3542                                    unsigned int n)
3543 {
3544         unsigned int i;
3545
3546         for (i = 0; i < n; i++) {
3547                 unsigned int entry = (start + i) % NUM_TX_DESC;
3548                 struct ring_info *tx_skb = tp->tx_skb + entry;
3549                 unsigned int len = tx_skb->len;
3550
3551                 if (len) {
3552                         struct sk_buff *skb = tx_skb->skb;
3553
3554                         rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
3555                                              tp->TxDescArray + entry);
3556                         if (skb) {
3557                                 tp->dev->stats.tx_dropped++;
3558                                 dev_kfree_skb(skb);
3559                                 tx_skb->skb = NULL;
3560                         }
3561                 }
3562         }
3563 }
3564
3565 static void rtl8169_tx_clear(struct rtl8169_private *tp)
3566 {
3567         rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC);
3568         tp->cur_tx = tp->dirty_tx = 0;
3569 }
3570
3571 static void rtl8169_schedule_work(struct net_device *dev, work_func_t task)
3572 {
3573         struct rtl8169_private *tp = netdev_priv(dev);
3574
3575         PREPARE_DELAYED_WORK(&tp->task, task);
3576         schedule_delayed_work(&tp->task, 4);
3577 }
3578
3579 static void rtl8169_wait_for_quiescence(struct net_device *dev)
3580 {
3581         struct rtl8169_private *tp = netdev_priv(dev);
3582         void __iomem *ioaddr = tp->mmio_addr;
3583
3584         synchronize_irq(dev->irq);
3585
3586         /* Wait for any pending NAPI task to complete */
3587         napi_disable(&tp->napi);
3588
3589         rtl8169_irq_mask_and_ack(ioaddr);
3590
3591         tp->intr_mask = 0xffff;
3592         RTL_W16(IntrMask, tp->intr_event);
3593         napi_enable(&tp->napi);
3594 }
3595
3596 static void rtl8169_reinit_task(struct work_struct *work)
3597 {
3598         struct rtl8169_private *tp =
3599                 container_of(work, struct rtl8169_private, task.work);
3600         struct net_device *dev = tp->dev;
3601         int ret;
3602
3603         rtnl_lock();
3604
3605         if (!netif_running(dev))
3606                 goto out_unlock;
3607
3608         rtl8169_wait_for_quiescence(dev);
3609         rtl8169_close(dev);
3610
3611         ret = rtl8169_open(dev);
3612         if (unlikely(ret < 0)) {
3613                 if (net_ratelimit())
3614                         netif_err(tp, drv, dev,
3615                                   "reinit failure (status = %d). Rescheduling\n",
3616                                   ret);
3617                 rtl8169_schedule_work(dev, rtl8169_reinit_task);
3618         }
3619
3620 out_unlock:
3621         rtnl_unlock();
3622 }
3623
3624 static void rtl8169_reset_task(struct work_struct *work)
3625 {
3626         struct rtl8169_private *tp =
3627                 container_of(work, struct rtl8169_private, task.work);
3628         struct net_device *dev = tp->dev;
3629
3630         rtnl_lock();
3631
3632         if (!netif_running(dev))
3633                 goto out_unlock;
3634
3635         rtl8169_wait_for_quiescence(dev);
3636
3637         rtl8169_rx_interrupt(dev, tp, tp->mmio_addr, ~(u32)0);
3638         rtl8169_tx_clear(tp);
3639
3640         if (tp->dirty_rx == tp->cur_rx) {
3641                 rtl8169_init_ring_indexes(tp);
3642                 rtl_hw_start(dev);
3643                 netif_wake_queue(dev);
3644                 rtl8169_check_link_status(dev, tp, tp->mmio_addr);
3645         } else {
3646                 if (net_ratelimit())
3647                         netif_emerg(tp, intr, dev, "Rx buffers shortage\n");
3648                 rtl8169_schedule_work(dev, rtl8169_reset_task);
3649         }
3650
3651 out_unlock:
3652         rtnl_unlock();
3653 }
3654
3655 static void rtl8169_tx_timeout(struct net_device *dev)
3656 {
3657         struct rtl8169_private *tp = netdev_priv(dev);
3658
3659         rtl8169_hw_reset(tp->mmio_addr);
3660
3661         /* Let's wait a bit while any (async) irq lands on */
3662         rtl8169_schedule_work(dev, rtl8169_reset_task);
3663 }
3664
3665 static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
3666                               u32 opts1)
3667 {
3668         struct skb_shared_info *info = skb_shinfo(skb);
3669         unsigned int cur_frag, entry;
3670         struct TxDesc * uninitialized_var(txd);
3671         struct device *d = &tp->pci_dev->dev;
3672
3673         entry = tp->cur_tx;
3674         for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
3675                 skb_frag_t *frag = info->frags + cur_frag;
3676                 dma_addr_t mapping;
3677                 u32 status, len;
3678                 void *addr;
3679
3680                 entry = (entry + 1) % NUM_TX_DESC;
3681
3682                 txd = tp->TxDescArray + entry;
3683                 len = frag->size;
3684                 addr = ((void *) page_address(frag->page)) + frag->page_offset;
3685                 mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE);
3686                 if (unlikely(dma_mapping_error(d, mapping))) {
3687                         if (net_ratelimit())
3688                                 netif_err(tp, drv, tp->dev,
3689                                           "Failed to map TX fragments DMA!\n");
3690                         goto err_out;
3691                 }
3692
3693                 /* anti gcc 2.95.3 bugware (sic) */
3694                 status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
3695
3696                 txd->opts1 = cpu_to_le32(status);
3697                 txd->addr = cpu_to_le64(mapping);
3698
3699                 tp->tx_skb[entry].len = len;
3700         }
3701
3702         if (cur_frag) {
3703                 tp->tx_skb[entry].skb = skb;
3704                 txd->opts1 |= cpu_to_le32(LastFrag);
3705         }
3706
3707         return cur_frag;
3708
3709 err_out:
3710         rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag);
3711         return -EIO;
3712 }
3713
3714 static inline u32 rtl8169_tso_csum(struct sk_buff *skb, struct net_device *dev)
3715 {
3716         if (dev->features & NETIF_F_TSO) {
3717                 u32 mss = skb_shinfo(skb)->gso_size;
3718
3719                 if (mss)
3720                         return LargeSend | ((mss & MSSMask) << MSSShift);
3721         }
3722         if (skb->ip_summed == CHECKSUM_PARTIAL) {
3723                 const struct iphdr *ip = ip_hdr(skb);
3724
3725                 if (ip->protocol == IPPROTO_TCP)
3726                         return IPCS | TCPCS;
3727                 else if (ip->protocol == IPPROTO_UDP)
3728                         return IPCS | UDPCS;
3729                 WARN_ON(1);     /* we need a WARN() */
3730         }
3731         return 0;
3732 }
3733
3734 static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
3735                                       struct net_device *dev)
3736 {
3737         struct rtl8169_private *tp = netdev_priv(dev);
3738         unsigned int entry = tp->cur_tx % NUM_TX_DESC;
3739         struct TxDesc *txd = tp->TxDescArray + entry;
3740         void __iomem *ioaddr = tp->mmio_addr;
3741         struct device *d = &tp->pci_dev->dev;
3742         dma_addr_t mapping;
3743         u32 status, len;
3744         u32 opts1;
3745         int frags;
3746
3747         if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) {
3748                 netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
3749                 goto err_stop_0;
3750         }
3751
3752         if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
3753                 goto err_stop_0;
3754
3755         len = skb_headlen(skb);
3756         mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE);
3757         if (unlikely(dma_mapping_error(d, mapping))) {
3758                 if (net_ratelimit())
3759                         netif_err(tp, drv, dev, "Failed to map TX DMA!\n");
3760                 goto err_dma_0;
3761         }
3762
3763         tp->tx_skb[entry].len = len;
3764         txd->addr = cpu_to_le64(mapping);
3765         txd->opts2 = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb));
3766
3767         opts1 = DescOwn | rtl8169_tso_csum(skb, dev);
3768
3769         frags = rtl8169_xmit_frags(tp, skb, opts1);
3770         if (frags < 0)
3771                 goto err_dma_1;
3772         else if (frags)
3773                 opts1 |= FirstFrag;
3774         else {
3775                 opts1 |= FirstFrag | LastFrag;
3776                 tp->tx_skb[entry].skb = skb;
3777         }
3778
3779         wmb();
3780
3781         /* anti gcc 2.95.3 bugware (sic) */
3782         status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
3783         txd->opts1 = cpu_to_le32(status);
3784
3785         tp->cur_tx += frags + 1;
3786
3787         wmb();
3788
3789         RTL_W8(TxPoll, NPQ);    /* set polling bit */
3790
3791         if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) {
3792                 netif_stop_queue(dev);
3793                 smp_rmb();
3794                 if (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)
3795                         netif_wake_queue(dev);
3796         }
3797
3798         return NETDEV_TX_OK;
3799
3800 err_dma_1:
3801         rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
3802 err_dma_0:
3803         dev_kfree_skb(skb);
3804         dev->stats.tx_dropped++;
3805         return NETDEV_TX_OK;
3806
3807 err_stop_0:
3808         netif_stop_queue(dev);
3809         dev->stats.tx_dropped++;
3810         return NETDEV_TX_BUSY;
3811 }
3812
3813 static void rtl8169_pcierr_interrupt(struct net_device *dev)
3814 {
3815         struct rtl8169_private *tp = netdev_priv(dev);
3816         struct pci_dev *pdev = tp->pci_dev;
3817         void __iomem *ioaddr = tp->mmio_addr;
3818         u16 pci_status, pci_cmd;
3819
3820         pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
3821         pci_read_config_word(pdev, PCI_STATUS, &pci_status);
3822
3823         netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status = 0x%04x)\n",
3824                   pci_cmd, pci_status);
3825
3826         /*
3827          * The recovery sequence below admits a very elaborated explanation:
3828          * - it seems to work;
3829          * - I did not see what else could be done;
3830          * - it makes iop3xx happy.
3831          *
3832          * Feel free to adjust to your needs.
3833          */
3834         if (pdev->broken_parity_status)
3835                 pci_cmd &= ~PCI_COMMAND_PARITY;
3836         else
3837                 pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY;
3838
3839         pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
3840
3841         pci_write_config_word(pdev, PCI_STATUS,
3842                 pci_status & (PCI_STATUS_DETECTED_PARITY |
3843                 PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT |
3844                 PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
3845
3846         /* The infamous DAC f*ckup only happens at boot time */
3847         if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) {
3848                 netif_info(tp, intr, dev, "disabling PCI DAC\n");
3849                 tp->cp_cmd &= ~PCIDAC;
3850                 RTL_W16(CPlusCmd, tp->cp_cmd);
3851                 dev->features &= ~NETIF_F_HIGHDMA;
3852         }
3853
3854         rtl8169_hw_reset(ioaddr);
3855
3856         rtl8169_schedule_work(dev, rtl8169_reinit_task);
3857 }
3858
3859 static void rtl8169_tx_interrupt(struct net_device *dev,
3860                                  struct rtl8169_private *tp,
3861                                  void __iomem *ioaddr)
3862 {
3863         unsigned int dirty_tx, tx_left;
3864
3865         dirty_tx = tp->dirty_tx;
3866         smp_rmb();
3867         tx_left = tp->cur_tx - dirty_tx;
3868
3869         while (tx_left > 0) {
3870                 unsigned int entry = dirty_tx % NUM_TX_DESC;
3871                 struct ring_info *tx_skb = tp->tx_skb + entry;
3872                 u32 status;
3873
3874                 rmb();
3875                 status = le32_to_cpu(tp->TxDescArray[entry].opts1);
3876                 if (status & DescOwn)
3877                         break;
3878
3879                 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
3880                                      tp->TxDescArray + entry);
3881                 if (status & LastFrag) {
3882                         dev->stats.tx_packets++;
3883                         dev->stats.tx_bytes += tx_skb->skb->len;
3884                         dev_kfree_skb(tx_skb->skb);
3885                         tx_skb->skb = NULL;
3886                 }
3887                 dirty_tx++;
3888                 tx_left--;
3889         }
3890
3891         if (tp->dirty_tx != dirty_tx) {
3892                 tp->dirty_tx = dirty_tx;
3893                 smp_wmb();
3894                 if (netif_queue_stopped(dev) &&
3895                     (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) {
3896                         netif_wake_queue(dev);
3897                 }
3898                 /*
3899                  * 8168 hack: TxPoll requests are lost when the Tx packets are
3900                  * too close. Let's kick an extra TxPoll request when a burst
3901                  * of start_xmit activity is detected (if it is not detected,
3902                  * it is slow enough). -- FR
3903                  */
3904                 smp_rmb();
3905                 if (tp->cur_tx != dirty_tx)
3906                         RTL_W8(TxPoll, NPQ);
3907         }
3908 }
3909
3910 static inline int rtl8169_fragmented_frame(u32 status)
3911 {
3912         return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
3913 }
3914
3915 static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
3916 {
3917         u32 status = opts1 & RxProtoMask;
3918
3919         if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
3920             ((status == RxProtoUDP) && !(opts1 & UDPFail)))
3921                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3922         else
3923                 skb_checksum_none_assert(skb);
3924 }
3925
3926 static struct sk_buff *rtl8169_try_rx_copy(void *data,
3927                                            struct rtl8169_private *tp,
3928                                            int pkt_size,
3929                                            dma_addr_t addr)
3930 {
3931         struct sk_buff *skb;
3932         struct device *d = &tp->pci_dev->dev;
3933
3934         data = rtl8169_align(data);
3935         dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE);
3936         prefetch(data);
3937         skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
3938         if (skb)
3939                 memcpy(skb->data, data, pkt_size);
3940         dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
3941
3942         return skb;
3943 }
3944
3945 /*
3946  * Warning : rtl8169_rx_interrupt() might be called :
3947  * 1) from NAPI (softirq) context
3948  *      (polling = 1 : we should call netif_receive_skb())
3949  * 2) from process context (rtl8169_reset_task())
3950  *      (polling = 0 : we must call netif_rx() instead)
3951  */
3952 static int rtl8169_rx_interrupt(struct net_device *dev,
3953                                 struct rtl8169_private *tp,
3954                                 void __iomem *ioaddr, u32 budget)
3955 {
3956         unsigned int cur_rx, rx_left;
3957         unsigned int count;
3958         int polling = (budget != ~(u32)0) ? 1 : 0;
3959
3960         cur_rx = tp->cur_rx;
3961         rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
3962         rx_left = min(rx_left, budget);
3963
3964         for (; rx_left > 0; rx_left--, cur_rx++) {
3965                 unsigned int entry = cur_rx % NUM_RX_DESC;
3966                 struct RxDesc *desc = tp->RxDescArray + entry;
3967                 u32 status;
3968
3969                 rmb();
3970                 status = le32_to_cpu(desc->opts1);
3971
3972                 if (status & DescOwn)
3973                         break;
3974                 if (unlikely(status & RxRES)) {
3975                         netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n",
3976                                    status);
3977                         dev->stats.rx_errors++;
3978                         if (status & (RxRWT | RxRUNT))
3979                                 dev->stats.rx_length_errors++;
3980                         if (status & RxCRC)
3981                                 dev->stats.rx_crc_errors++;
3982                         if (status & RxFOVF) {
3983                                 rtl8169_schedule_work(dev, rtl8169_reset_task);
3984                                 dev->stats.rx_fifo_errors++;
3985                         }
3986                         rtl8169_mark_to_asic(desc, rx_buf_sz);
3987                 } else {
3988                         struct sk_buff *skb;
3989                         dma_addr_t addr = le64_to_cpu(desc->addr);
3990                         int pkt_size = (status & 0x00001FFF) - 4;
3991
3992                         /*
3993                          * The driver does not support incoming fragmented
3994                          * frames. They are seen as a symptom of over-mtu
3995                          * sized frames.
3996                          */
3997                         if (unlikely(rtl8169_fragmented_frame(status))) {
3998                                 dev->stats.rx_dropped++;
3999                                 dev->stats.rx_length_errors++;
4000                                 rtl8169_mark_to_asic(desc, rx_buf_sz);
4001                                 continue;
4002                         }
4003
4004                         skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry],
4005                                                   tp, pkt_size, addr);
4006                         rtl8169_mark_to_asic(desc, rx_buf_sz);
4007                         if (!skb) {
4008                                 dev->stats.rx_dropped++;
4009                                 continue;
4010                         }
4011
4012                         rtl8169_rx_csum(skb, status);
4013                         skb_put(skb, pkt_size);
4014                         skb->protocol = eth_type_trans(skb, dev);
4015
4016                         if (rtl8169_rx_vlan_skb(tp, desc, skb, polling) < 0) {
4017                                 if (likely(polling))
4018                                         napi_gro_receive(&tp->napi, skb);
4019                                 else
4020                                         netif_rx(skb);
4021                         }
4022
4023                         dev->stats.rx_bytes += pkt_size;
4024                         dev->stats.rx_packets++;
4025                 }
4026
4027                 /* Work around for AMD plateform. */
4028                 if ((desc->opts2 & cpu_to_le32(0xfffe000)) &&
4029                     (tp->mac_version == RTL_GIGA_MAC_VER_05)) {
4030                         desc->opts2 = 0;
4031                         cur_rx++;
4032                 }
4033         }
4034
4035         count = cur_rx - tp->cur_rx;
4036         tp->cur_rx = cur_rx;
4037
4038         tp->dirty_rx += count;
4039
4040         return count;
4041 }
4042
4043 static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
4044 {
4045         struct net_device *dev = dev_instance;
4046         struct rtl8169_private *tp = netdev_priv(dev);
4047         void __iomem *ioaddr = tp->mmio_addr;
4048         int handled = 0;
4049         int status;
4050
4051         /* loop handling interrupts until we have no new ones or
4052          * we hit a invalid/hotplug case.
4053          */
4054         status = RTL_R16(IntrStatus);
4055         while (status && status != 0xffff) {
4056                 handled = 1;
4057
4058                 /* Handle all of the error cases first. These will reset
4059                  * the chip, so just exit the loop.
4060                  */
4061                 if (unlikely(!netif_running(dev))) {
4062                         rtl8169_asic_down(ioaddr);
4063                         break;
4064                 }
4065
4066                 /* Work around for rx fifo overflow */
4067                 if (unlikely(status & RxFIFOOver) &&
4068                 (tp->mac_version == RTL_GIGA_MAC_VER_11)) {
4069                         netif_stop_queue(dev);
4070                         rtl8169_tx_timeout(dev);
4071                         break;
4072                 }
4073
4074                 if (unlikely(status & SYSErr)) {
4075                         rtl8169_pcierr_interrupt(dev);
4076                         break;
4077                 }
4078
4079                 if (status & LinkChg)
4080                         __rtl8169_check_link_status(dev, tp, ioaddr, true);
4081
4082                 /* We need to see the lastest version of tp->intr_mask to
4083                  * avoid ignoring an MSI interrupt and having to wait for
4084                  * another event which may never come.
4085                  */
4086                 smp_rmb();
4087                 if (status & tp->intr_mask & tp->napi_event) {
4088                         RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event);
4089                         tp->intr_mask = ~tp->napi_event;
4090
4091                         if (likely(napi_schedule_prep(&tp->napi)))
4092                                 __napi_schedule(&tp->napi);
4093                         else
4094                                 netif_info(tp, intr, dev,
4095                                            "interrupt %04x in poll\n", status);
4096                 }
4097
4098                 /* We only get a new MSI interrupt when all active irq
4099                  * sources on the chip have been acknowledged. So, ack
4100                  * everything we've seen and check if new sources have become
4101                  * active to avoid blocking all interrupts from the chip.
4102                  */
4103                 RTL_W16(IntrStatus,
4104                         (status & RxFIFOOver) ? (status | RxOverflow) : status);
4105                 status = RTL_R16(IntrStatus);
4106         }
4107
4108         return IRQ_RETVAL(handled);
4109 }
4110
4111 static int rtl8169_poll(struct napi_struct *napi, int budget)
4112 {
4113         struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
4114         struct net_device *dev = tp->dev;
4115         void __iomem *ioaddr = tp->mmio_addr;
4116         int work_done;
4117
4118         work_done = rtl8169_rx_interrupt(dev, tp, ioaddr, (u32) budget);
4119         rtl8169_tx_interrupt(dev, tp, ioaddr);
4120
4121         if (work_done < budget) {
4122                 napi_complete(napi);
4123
4124                 /* We need for force the visibility of tp->intr_mask
4125                  * for other CPUs, as we can loose an MSI interrupt
4126                  * and potentially wait for a retransmit timeout if we don't.
4127                  * The posted write to IntrMask is safe, as it will
4128                  * eventually make it to the chip and we won't loose anything
4129                  * until it does.
4130                  */
4131                 tp->intr_mask = 0xffff;
4132                 wmb();
4133                 RTL_W16(IntrMask, tp->intr_event);
4134         }
4135
4136         return work_done;
4137 }
4138
4139 static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr)
4140 {
4141         struct rtl8169_private *tp = netdev_priv(dev);
4142
4143         if (tp->mac_version > RTL_GIGA_MAC_VER_06)
4144                 return;
4145
4146         dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff);
4147         RTL_W32(RxMissed, 0);
4148 }
4149
4150 static void rtl8169_down(struct net_device *dev)
4151 {
4152         struct rtl8169_private *tp = netdev_priv(dev);
4153         void __iomem *ioaddr = tp->mmio_addr;
4154
4155         rtl8169_delete_timer(dev);
4156
4157         netif_stop_queue(dev);
4158
4159         napi_disable(&tp->napi);
4160
4161         spin_lock_irq(&tp->lock);
4162
4163         rtl8169_asic_down(ioaddr);
4164         /*
4165          * At this point device interrupts can not be enabled in any function,
4166          * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task,
4167          * rtl8169_reinit_task) and napi is disabled (rtl8169_poll).
4168          */
4169         rtl8169_rx_missed(dev, ioaddr);
4170
4171         spin_unlock_irq(&tp->lock);
4172
4173         synchronize_irq(dev->irq);
4174
4175         /* Give a racing hard_start_xmit a few cycles to complete. */
4176         synchronize_sched();  /* FIXME: should this be synchronize_irq()? */
4177
4178         rtl8169_tx_clear(tp);
4179
4180         rtl8169_rx_clear(tp);
4181 }
4182
4183 static int rtl8169_close(struct net_device *dev)
4184 {
4185         struct rtl8169_private *tp = netdev_priv(dev);
4186         struct pci_dev *pdev = tp->pci_dev;
4187
4188         pm_runtime_get_sync(&pdev->dev);
4189
4190         /* update counters before going down */
4191         rtl8169_update_counters(dev);
4192
4193         rtl8169_down(dev);
4194
4195         free_irq(dev->irq, dev);
4196
4197         dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
4198                           tp->RxPhyAddr);
4199         dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
4200                           tp->TxPhyAddr);
4201         tp->TxDescArray = NULL;
4202         tp->RxDescArray = NULL;
4203
4204         pm_runtime_put_sync(&pdev->dev);
4205
4206         return 0;
4207 }
4208
4209 static void rtl_set_rx_mode(struct net_device *dev)
4210 {
4211         struct rtl8169_private *tp = netdev_priv(dev);
4212         void __iomem *ioaddr = tp->mmio_addr;
4213         unsigned long flags;
4214         u32 mc_filter[2];       /* Multicast hash filter */
4215         int rx_mode;
4216         u32 tmp = 0;
4217
4218         if (dev->flags & IFF_PROMISC) {
4219                 /* Unconditionally log net taps. */
4220                 netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
4221                 rx_mode =
4222                     AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
4223                     AcceptAllPhys;
4224                 mc_filter[1] = mc_filter[0] = 0xffffffff;
4225         } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
4226                    (dev->flags & IFF_ALLMULTI)) {
4227                 /* Too many to filter perfectly -- accept all multicasts. */
4228                 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
4229                 mc_filter[1] = mc_filter[0] = 0xffffffff;
4230         } else {
4231                 struct netdev_hw_addr *ha;
4232
4233                 rx_mode = AcceptBroadcast | AcceptMyPhys;
4234                 mc_filter[1] = mc_filter[0] = 0;
4235                 netdev_for_each_mc_addr(ha, dev) {
4236                         int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
4237                         mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
4238                         rx_mode |= AcceptMulticast;
4239                 }
4240         }
4241
4242         spin_lock_irqsave(&tp->lock, flags);
4243
4244         tmp = rtl8169_rx_config | rx_mode |
4245               (RTL_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
4246
4247         if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
4248                 u32 data = mc_filter[0];
4249
4250                 mc_filter[0] = swab32(mc_filter[1]);
4251                 mc_filter[1] = swab32(data);
4252         }
4253
4254         RTL_W32(MAR0 + 4, mc_filter[1]);
4255         RTL_W32(MAR0 + 0, mc_filter[0]);
4256
4257         RTL_W32(RxConfig, tmp);
4258
4259         spin_unlock_irqrestore(&tp->lock, flags);
4260 }
4261
4262 /**
4263  *  rtl8169_get_stats - Get rtl8169 read/write statistics
4264  *  @dev: The Ethernet Device to get statistics for
4265  *
4266  *  Get TX/RX statistics for rtl8169
4267  */
4268 static struct net_device_stats *rtl8169_get_stats(struct net_device *dev)
4269 {
4270         struct rtl8169_private *tp = netdev_priv(dev);
4271         void __iomem *ioaddr = tp->mmio_addr;
4272         unsigned long flags;
4273
4274         if (netif_running(dev)) {
4275                 spin_lock_irqsave(&tp->lock, flags);
4276                 rtl8169_rx_missed(dev, ioaddr);
4277                 spin_unlock_irqrestore(&tp->lock, flags);
4278         }
4279
4280         return &dev->stats;
4281 }
4282
4283 static void rtl8169_net_suspend(struct net_device *dev)
4284 {
4285         if (!netif_running(dev))
4286                 return;
4287
4288         netif_device_detach(dev);
4289         netif_stop_queue(dev);
4290 }
4291
4292 #ifdef CONFIG_PM
4293
4294 static int rtl8169_suspend(struct device *device)
4295 {
4296         struct pci_dev *pdev = to_pci_dev(device);
4297         struct net_device *dev = pci_get_drvdata(pdev);
4298
4299         rtl8169_net_suspend(dev);
4300
4301         return 0;
4302 }
4303
4304 static void __rtl8169_resume(struct net_device *dev)
4305 {
4306         netif_device_attach(dev);
4307         rtl8169_schedule_work(dev, rtl8169_reset_task);
4308 }
4309
4310 static int rtl8169_resume(struct device *device)
4311 {
4312         struct pci_dev *pdev = to_pci_dev(device);
4313         struct net_device *dev = pci_get_drvdata(pdev);
4314         struct rtl8169_private *tp = netdev_priv(dev);
4315
4316         rtl8169_init_phy(dev, tp);
4317
4318         if (netif_running(dev))
4319                 __rtl8169_resume(dev);
4320
4321         return 0;
4322 }
4323
4324 static int rtl8169_runtime_suspend(struct device *device)
4325 {
4326         struct pci_dev *pdev = to_pci_dev(device);
4327         struct net_device *dev = pci_get_drvdata(pdev);
4328         struct rtl8169_private *tp = netdev_priv(dev);
4329
4330         if (!tp->TxDescArray)
4331                 return 0;
4332
4333         spin_lock_irq(&tp->lock);
4334         tp->saved_wolopts = __rtl8169_get_wol(tp);
4335         __rtl8169_set_wol(tp, WAKE_ANY);
4336         spin_unlock_irq(&tp->lock);
4337
4338         rtl8169_net_suspend(dev);
4339
4340         return 0;
4341 }
4342
4343 static int rtl8169_runtime_resume(struct device *device)
4344 {
4345         struct pci_dev *pdev = to_pci_dev(device);
4346         struct net_device *dev = pci_get_drvdata(pdev);
4347         struct rtl8169_private *tp = netdev_priv(dev);
4348
4349         if (!tp->TxDescArray)
4350                 return 0;
4351
4352         spin_lock_irq(&tp->lock);
4353         __rtl8169_set_wol(tp, tp->saved_wolopts);
4354         tp->saved_wolopts = 0;
4355         spin_unlock_irq(&tp->lock);
4356
4357         rtl8169_init_phy(dev, tp);
4358
4359         __rtl8169_resume(dev);
4360
4361         return 0;
4362 }
4363
4364 static int rtl8169_runtime_idle(struct device *device)
4365 {
4366         struct pci_dev *pdev = to_pci_dev(device);
4367         struct net_device *dev = pci_get_drvdata(pdev);
4368         struct rtl8169_private *tp = netdev_priv(dev);
4369
4370         return tp->TxDescArray ? -EBUSY : 0;
4371 }
4372
4373 static const struct dev_pm_ops rtl8169_pm_ops = {
4374         .suspend = rtl8169_suspend,
4375         .resume = rtl8169_resume,
4376         .freeze = rtl8169_suspend,
4377         .thaw = rtl8169_resume,
4378         .poweroff = rtl8169_suspend,
4379         .restore = rtl8169_resume,
4380         .runtime_suspend = rtl8169_runtime_suspend,
4381         .runtime_resume = rtl8169_runtime_resume,
4382         .runtime_idle = rtl8169_runtime_idle,
4383 };
4384
4385 #define RTL8169_PM_OPS  (&rtl8169_pm_ops)
4386
4387 #else /* !CONFIG_PM */
4388
4389 #define RTL8169_PM_OPS  NULL
4390
4391 #endif /* !CONFIG_PM */
4392
4393 static void rtl_shutdown(struct pci_dev *pdev)
4394 {
4395         struct net_device *dev = pci_get_drvdata(pdev);
4396         struct rtl8169_private *tp = netdev_priv(dev);
4397         void __iomem *ioaddr = tp->mmio_addr;
4398
4399         rtl8169_net_suspend(dev);
4400
4401         /* restore original MAC address */
4402         rtl_rar_set(tp, dev->perm_addr);
4403
4404         spin_lock_irq(&tp->lock);
4405
4406         rtl8169_asic_down(ioaddr);
4407
4408         spin_unlock_irq(&tp->lock);
4409
4410         if (system_state == SYSTEM_POWER_OFF) {
4411                 /* WoL fails with some 8168 when the receiver is disabled. */
4412                 if (tp->features & RTL_FEATURE_WOL) {
4413                         pci_clear_master(pdev);
4414
4415                         RTL_W8(ChipCmd, CmdRxEnb);
4416                         /* PCI commit */
4417                         RTL_R8(ChipCmd);
4418                 }
4419
4420                 pci_wake_from_d3(pdev, true);
4421                 pci_set_power_state(pdev, PCI_D3hot);
4422         }
4423 }
4424
4425 static struct pci_driver rtl8169_pci_driver = {
4426         .name           = MODULENAME,
4427         .id_table       = rtl8169_pci_tbl,
4428         .probe          = rtl8169_init_one,
4429         .remove         = __devexit_p(rtl8169_remove_one),
4430         .shutdown       = rtl_shutdown,
4431         .driver.pm      = RTL8169_PM_OPS,
4432 };
4433
4434 static int __init rtl8169_init_module(void)
4435 {
4436         return pci_register_driver(&rtl8169_pci_driver);
4437 }
4438
4439 static void __exit rtl8169_cleanup_module(void)
4440 {
4441         pci_unregister_driver(&rtl8169_pci_driver);
4442 }
4443
4444 module_init(rtl8169_init_module);
4445 module_exit(rtl8169_cleanup_module);