2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
18 #include <linux/config.h>
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
40 #include <net/checksum.h>
42 #include <asm/system.h>
44 #include <asm/byteorder.h>
45 #include <asm/uaccess.h>
48 #include <asm/idprom.h>
49 #include <asm/oplib.h>
53 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
54 #define TG3_VLAN_TAG_USED 1
56 #define TG3_VLAN_TAG_USED 0
60 #define TG3_TSO_SUPPORT 1
62 #define TG3_TSO_SUPPORT 0
67 #define DRV_MODULE_NAME "tg3"
68 #define PFX DRV_MODULE_NAME ": "
69 #define DRV_MODULE_VERSION "3.34"
70 #define DRV_MODULE_RELDATE "July 25, 2005"
72 #define TG3_DEF_MAC_MODE 0
73 #define TG3_DEF_RX_MODE 0
74 #define TG3_DEF_TX_MODE 0
75 #define TG3_DEF_MSG_ENABLE \
85 /* length of time before we decide the hardware is borked,
86 * and dev->tx_timeout() should be called to fix the problem
88 #define TG3_TX_TIMEOUT (5 * HZ)
90 /* hardware minimum and maximum for a single frame's data payload */
91 #define TG3_MIN_MTU 60
92 #define TG3_MAX_MTU(tp) \
93 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
95 /* These numbers seem to be hard coded in the NIC firmware somehow.
96 * You can't change the ring sizes, but you can change where you place
97 * them in the NIC onboard memory.
99 #define TG3_RX_RING_SIZE 512
100 #define TG3_DEF_RX_RING_PENDING 200
101 #define TG3_RX_JUMBO_RING_SIZE 256
102 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
104 /* Do not place this n-ring entries value into the tp struct itself,
105 * we really want to expose these constants to GCC so that modulo et
106 * al. operations are done with shifts and masks instead of with
107 * hw multiply/modulo instructions. Another solution would be to
108 * replace things like '% foo' with '& (foo - 1)'.
110 #define TG3_RX_RCB_RING_SIZE(tp) \
111 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
113 #define TG3_TX_RING_SIZE 512
114 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
116 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
118 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
119 TG3_RX_JUMBO_RING_SIZE)
120 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
121 TG3_RX_RCB_RING_SIZE(tp))
122 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
124 #define TX_RING_GAP(TP) \
125 (TG3_TX_RING_SIZE - (TP)->tx_pending)
126 #define TX_BUFFS_AVAIL(TP) \
127 (((TP)->tx_cons <= (TP)->tx_prod) ? \
128 (TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod : \
129 (TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP))
130 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
132 #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
133 #define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
135 /* minimum number of free TX descriptors required to wake up TX process */
136 #define TG3_TX_WAKEUP_THRESH (TG3_TX_RING_SIZE / 4)
138 /* number of ETHTOOL_GSTATS u64's */
139 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
141 #define TG3_NUM_TEST 6
143 static char version[] __devinitdata =
144 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
146 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
147 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
148 MODULE_LICENSE("GPL");
149 MODULE_VERSION(DRV_MODULE_VERSION);
151 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
152 module_param(tg3_debug, int, 0);
153 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
155 static struct pci_device_id tg3_pci_tbl[] = {
156 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
157 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
159 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
161 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
163 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
165 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
167 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
169 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
171 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
173 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
175 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
177 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
179 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
181 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
183 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
185 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
187 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
189 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
191 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
193 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
195 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
197 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
199 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
201 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
203 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
205 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
207 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
209 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
211 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
213 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
214 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
215 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
216 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
217 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
218 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
219 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
221 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
223 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
224 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
225 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
226 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
227 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
228 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
229 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
230 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
231 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
232 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
233 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
234 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
235 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
236 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
237 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
238 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
239 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
240 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
241 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
242 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
243 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
247 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
250 const char string[ETH_GSTRING_LEN];
251 } ethtool_stats_keys[TG3_NUM_STATS] = {
254 { "rx_ucast_packets" },
255 { "rx_mcast_packets" },
256 { "rx_bcast_packets" },
258 { "rx_align_errors" },
259 { "rx_xon_pause_rcvd" },
260 { "rx_xoff_pause_rcvd" },
261 { "rx_mac_ctrl_rcvd" },
262 { "rx_xoff_entered" },
263 { "rx_frame_too_long_errors" },
265 { "rx_undersize_packets" },
266 { "rx_in_length_errors" },
267 { "rx_out_length_errors" },
268 { "rx_64_or_less_octet_packets" },
269 { "rx_65_to_127_octet_packets" },
270 { "rx_128_to_255_octet_packets" },
271 { "rx_256_to_511_octet_packets" },
272 { "rx_512_to_1023_octet_packets" },
273 { "rx_1024_to_1522_octet_packets" },
274 { "rx_1523_to_2047_octet_packets" },
275 { "rx_2048_to_4095_octet_packets" },
276 { "rx_4096_to_8191_octet_packets" },
277 { "rx_8192_to_9022_octet_packets" },
284 { "tx_flow_control" },
286 { "tx_single_collisions" },
287 { "tx_mult_collisions" },
289 { "tx_excessive_collisions" },
290 { "tx_late_collisions" },
291 { "tx_collide_2times" },
292 { "tx_collide_3times" },
293 { "tx_collide_4times" },
294 { "tx_collide_5times" },
295 { "tx_collide_6times" },
296 { "tx_collide_7times" },
297 { "tx_collide_8times" },
298 { "tx_collide_9times" },
299 { "tx_collide_10times" },
300 { "tx_collide_11times" },
301 { "tx_collide_12times" },
302 { "tx_collide_13times" },
303 { "tx_collide_14times" },
304 { "tx_collide_15times" },
305 { "tx_ucast_packets" },
306 { "tx_mcast_packets" },
307 { "tx_bcast_packets" },
308 { "tx_carrier_sense_errors" },
312 { "dma_writeq_full" },
313 { "dma_write_prioq_full" },
317 { "rx_threshold_hit" },
319 { "dma_readq_full" },
320 { "dma_read_prioq_full" },
321 { "tx_comp_queue_full" },
323 { "ring_set_send_prod_index" },
324 { "ring_status_update" },
326 { "nic_avoided_irqs" },
327 { "nic_tx_threshold_hit" }
331 const char string[ETH_GSTRING_LEN];
332 } ethtool_test_keys[TG3_NUM_TEST] = {
333 { "nvram test (online) " },
334 { "link test (online) " },
335 { "register test (offline)" },
336 { "memory test (offline)" },
337 { "loopback test (offline)" },
338 { "interrupt test (offline)" },
341 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
343 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
344 spin_lock_bh(&tp->indirect_lock);
345 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
346 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
347 spin_unlock_bh(&tp->indirect_lock);
349 writel(val, tp->regs + off);
350 if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0)
351 readl(tp->regs + off);
355 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
357 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
358 spin_lock_bh(&tp->indirect_lock);
359 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
360 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
361 spin_unlock_bh(&tp->indirect_lock);
363 void __iomem *dest = tp->regs + off;
365 readl(dest); /* always flush PCI write */
369 static inline void _tw32_rx_mbox(struct tg3 *tp, u32 off, u32 val)
371 void __iomem *mbox = tp->regs + off;
373 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
377 static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
379 void __iomem *mbox = tp->regs + off;
381 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
383 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
387 #define tw32_mailbox(reg, val) writel(((val) & 0xffffffff), tp->regs + (reg))
388 #define tw32_rx_mbox(reg, val) _tw32_rx_mbox(tp, reg, val)
389 #define tw32_tx_mbox(reg, val) _tw32_tx_mbox(tp, reg, val)
391 #define tw32(reg,val) tg3_write_indirect_reg32(tp,(reg),(val))
392 #define tw32_f(reg,val) _tw32_flush(tp,(reg),(val))
393 #define tw16(reg,val) writew(((val) & 0xffff), tp->regs + (reg))
394 #define tw8(reg,val) writeb(((val) & 0xff), tp->regs + (reg))
395 #define tr32(reg) readl(tp->regs + (reg))
396 #define tr16(reg) readw(tp->regs + (reg))
397 #define tr8(reg) readb(tp->regs + (reg))
399 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
401 spin_lock_bh(&tp->indirect_lock);
402 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
403 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
405 /* Always leave this as zero. */
406 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
407 spin_unlock_bh(&tp->indirect_lock);
410 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
412 spin_lock_bh(&tp->indirect_lock);
413 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
414 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
416 /* Always leave this as zero. */
417 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
418 spin_unlock_bh(&tp->indirect_lock);
421 static void tg3_disable_ints(struct tg3 *tp)
423 tw32(TG3PCI_MISC_HOST_CTRL,
424 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
425 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
426 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
429 static inline void tg3_cond_int(struct tg3 *tp)
431 if (tp->hw_status->status & SD_STATUS_UPDATED)
432 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
435 static void tg3_enable_ints(struct tg3 *tp)
440 tw32(TG3PCI_MISC_HOST_CTRL,
441 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
442 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
443 (tp->last_tag << 24));
444 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
448 static inline unsigned int tg3_has_work(struct tg3 *tp)
450 struct tg3_hw_status *sblk = tp->hw_status;
451 unsigned int work_exists = 0;
453 /* check for phy events */
454 if (!(tp->tg3_flags &
455 (TG3_FLAG_USE_LINKCHG_REG |
456 TG3_FLAG_POLL_SERDES))) {
457 if (sblk->status & SD_STATUS_LINK_CHG)
460 /* check for RX/TX work to do */
461 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
462 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
469 * similar to tg3_enable_ints, but it accurately determines whether there
470 * is new work pending and can return without flushing the PIO write
471 * which reenables interrupts
473 static void tg3_restart_ints(struct tg3 *tp)
475 tw32(TG3PCI_MISC_HOST_CTRL,
476 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
477 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
481 /* When doing tagged status, this work check is unnecessary.
482 * The last_tag we write above tells the chip which piece of
483 * work we've completed.
485 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
487 tw32(HOSTCC_MODE, tp->coalesce_mode |
488 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
491 static inline void tg3_netif_stop(struct tg3 *tp)
493 tp->dev->trans_start = jiffies; /* prevent tx timeout */
494 netif_poll_disable(tp->dev);
495 netif_tx_disable(tp->dev);
498 static inline void tg3_netif_start(struct tg3 *tp)
500 netif_wake_queue(tp->dev);
501 /* NOTE: unconditional netif_wake_queue is only appropriate
502 * so long as all callers are assured to have free tx slots
503 * (such as after tg3_init_hw)
505 netif_poll_enable(tp->dev);
506 tp->hw_status->status |= SD_STATUS_UPDATED;
510 static void tg3_switch_clocks(struct tg3 *tp)
512 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
515 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
518 orig_clock_ctrl = clock_ctrl;
519 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
520 CLOCK_CTRL_CLKRUN_OENABLE |
522 tp->pci_clock_ctrl = clock_ctrl;
524 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
525 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
526 tw32_f(TG3PCI_CLOCK_CTRL,
527 clock_ctrl | CLOCK_CTRL_625_CORE);
530 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
531 tw32_f(TG3PCI_CLOCK_CTRL,
533 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
535 tw32_f(TG3PCI_CLOCK_CTRL,
536 clock_ctrl | (CLOCK_CTRL_ALTCLK));
539 tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
543 #define PHY_BUSY_LOOPS 5000
545 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
551 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
553 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
559 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
560 MI_COM_PHY_ADDR_MASK);
561 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
562 MI_COM_REG_ADDR_MASK);
563 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
565 tw32_f(MAC_MI_COM, frame_val);
567 loops = PHY_BUSY_LOOPS;
570 frame_val = tr32(MAC_MI_COM);
572 if ((frame_val & MI_COM_BUSY) == 0) {
574 frame_val = tr32(MAC_MI_COM);
582 *val = frame_val & MI_COM_DATA_MASK;
586 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
587 tw32_f(MAC_MI_MODE, tp->mi_mode);
594 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
600 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
602 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
606 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
607 MI_COM_PHY_ADDR_MASK);
608 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
609 MI_COM_REG_ADDR_MASK);
610 frame_val |= (val & MI_COM_DATA_MASK);
611 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
613 tw32_f(MAC_MI_COM, frame_val);
615 loops = PHY_BUSY_LOOPS;
618 frame_val = tr32(MAC_MI_COM);
619 if ((frame_val & MI_COM_BUSY) == 0) {
621 frame_val = tr32(MAC_MI_COM);
631 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
632 tw32_f(MAC_MI_MODE, tp->mi_mode);
639 static void tg3_phy_set_wirespeed(struct tg3 *tp)
643 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
646 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
647 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
648 tg3_writephy(tp, MII_TG3_AUX_CTRL,
649 (val | (1 << 15) | (1 << 4)));
652 static int tg3_bmcr_reset(struct tg3 *tp)
657 /* OK, reset it, and poll the BMCR_RESET bit until it
658 * clears or we time out.
660 phy_control = BMCR_RESET;
661 err = tg3_writephy(tp, MII_BMCR, phy_control);
667 err = tg3_readphy(tp, MII_BMCR, &phy_control);
671 if ((phy_control & BMCR_RESET) == 0) {
683 static int tg3_wait_macro_done(struct tg3 *tp)
690 if (!tg3_readphy(tp, 0x16, &tmp32)) {
691 if ((tmp32 & 0x1000) == 0)
701 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
703 static const u32 test_pat[4][6] = {
704 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
705 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
706 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
707 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
711 for (chan = 0; chan < 4; chan++) {
714 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
715 (chan * 0x2000) | 0x0200);
716 tg3_writephy(tp, 0x16, 0x0002);
718 for (i = 0; i < 6; i++)
719 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
722 tg3_writephy(tp, 0x16, 0x0202);
723 if (tg3_wait_macro_done(tp)) {
728 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
729 (chan * 0x2000) | 0x0200);
730 tg3_writephy(tp, 0x16, 0x0082);
731 if (tg3_wait_macro_done(tp)) {
736 tg3_writephy(tp, 0x16, 0x0802);
737 if (tg3_wait_macro_done(tp)) {
742 for (i = 0; i < 6; i += 2) {
745 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
746 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
747 tg3_wait_macro_done(tp)) {
753 if (low != test_pat[chan][i] ||
754 high != test_pat[chan][i+1]) {
755 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
756 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
757 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
767 static int tg3_phy_reset_chanpat(struct tg3 *tp)
771 for (chan = 0; chan < 4; chan++) {
774 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
775 (chan * 0x2000) | 0x0200);
776 tg3_writephy(tp, 0x16, 0x0002);
777 for (i = 0; i < 6; i++)
778 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
779 tg3_writephy(tp, 0x16, 0x0202);
780 if (tg3_wait_macro_done(tp))
787 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
789 u32 reg32, phy9_orig;
790 int retries, do_phy_reset, err;
796 err = tg3_bmcr_reset(tp);
802 /* Disable transmitter and interrupt. */
803 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
807 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
809 /* Set full-duplex, 1000 mbps. */
810 tg3_writephy(tp, MII_BMCR,
811 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
813 /* Set to master mode. */
814 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
817 tg3_writephy(tp, MII_TG3_CTRL,
818 (MII_TG3_CTRL_AS_MASTER |
819 MII_TG3_CTRL_ENABLE_AS_MASTER));
821 /* Enable SM_DSP_CLOCK and 6dB. */
822 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
824 /* Block the PHY control access. */
825 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
826 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
828 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
833 err = tg3_phy_reset_chanpat(tp);
837 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
838 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
840 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
841 tg3_writephy(tp, 0x16, 0x0000);
843 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
844 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
845 /* Set Extended packet length bit for jumbo frames */
846 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
849 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
852 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
854 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
856 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
863 /* This will reset the tigon3 PHY if there is no valid
864 * link unless the FORCE argument is non-zero.
866 static int tg3_phy_reset(struct tg3 *tp)
871 err = tg3_readphy(tp, MII_BMSR, &phy_status);
872 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
876 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
877 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
878 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
879 err = tg3_phy_reset_5703_4_5(tp);
885 err = tg3_bmcr_reset(tp);
890 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
891 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
892 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
893 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
894 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
895 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
896 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
898 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
899 tg3_writephy(tp, 0x1c, 0x8d68);
900 tg3_writephy(tp, 0x1c, 0x8d68);
902 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
903 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
904 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
905 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
906 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
907 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
908 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
909 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
910 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
912 /* Set Extended packet length bit (bit 14) on all chips that */
913 /* support jumbo frames */
914 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
915 /* Cannot do read-modify-write on 5401 */
916 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
917 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
920 /* Set bit 14 with read-modify-write to preserve other bits */
921 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
922 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
923 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
926 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
927 * jumbo frames transmission.
929 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
932 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
933 tg3_writephy(tp, MII_TG3_EXT_CTRL,
934 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
937 tg3_phy_set_wirespeed(tp);
941 static void tg3_frob_aux_power(struct tg3 *tp)
943 struct tg3 *tp_peer = tp;
945 if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
948 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
949 tp_peer = pci_get_drvdata(tp->pdev_peer);
955 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
956 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
957 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
958 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
959 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
960 (GRC_LCLCTRL_GPIO_OE0 |
961 GRC_LCLCTRL_GPIO_OE1 |
962 GRC_LCLCTRL_GPIO_OE2 |
963 GRC_LCLCTRL_GPIO_OUTPUT0 |
964 GRC_LCLCTRL_GPIO_OUTPUT1));
971 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
974 /* On 5753 and variants, GPIO2 cannot be used. */
975 no_gpio2 = tp->nic_sram_data_cfg &
976 NIC_SRAM_DATA_CFG_NO_GPIO2;
978 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
979 GRC_LCLCTRL_GPIO_OE1 |
980 GRC_LCLCTRL_GPIO_OE2 |
981 GRC_LCLCTRL_GPIO_OUTPUT1 |
982 GRC_LCLCTRL_GPIO_OUTPUT2;
984 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
985 GRC_LCLCTRL_GPIO_OUTPUT2);
987 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
991 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
993 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
998 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
999 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1005 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1006 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1007 if (tp_peer != tp &&
1008 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1011 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1012 (GRC_LCLCTRL_GPIO_OE1 |
1013 GRC_LCLCTRL_GPIO_OUTPUT1));
1016 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1017 (GRC_LCLCTRL_GPIO_OE1));
1020 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1021 (GRC_LCLCTRL_GPIO_OE1 |
1022 GRC_LCLCTRL_GPIO_OUTPUT1));
1028 static int tg3_setup_phy(struct tg3 *, int);
1030 #define RESET_KIND_SHUTDOWN 0
1031 #define RESET_KIND_INIT 1
1032 #define RESET_KIND_SUSPEND 2
1034 static void tg3_write_sig_post_reset(struct tg3 *, int);
1035 static int tg3_halt_cpu(struct tg3 *, u32);
1037 static int tg3_set_power_state(struct tg3 *tp, int state)
1040 u16 power_control, power_caps;
1041 int pm = tp->pm_cap;
1043 /* Make sure register accesses (indirect or otherwise)
1044 * will function correctly.
1046 pci_write_config_dword(tp->pdev,
1047 TG3PCI_MISC_HOST_CTRL,
1048 tp->misc_host_ctrl);
1050 pci_read_config_word(tp->pdev,
1053 power_control |= PCI_PM_CTRL_PME_STATUS;
1054 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1058 pci_write_config_word(tp->pdev,
1061 udelay(100); /* Delay after power state change */
1063 /* Switch out of Vaux if it is not a LOM */
1064 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)) {
1065 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1084 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1086 tp->dev->name, state);
1090 power_control |= PCI_PM_CTRL_PME_ENABLE;
1092 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1093 tw32(TG3PCI_MISC_HOST_CTRL,
1094 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1096 if (tp->link_config.phy_is_low_power == 0) {
1097 tp->link_config.phy_is_low_power = 1;
1098 tp->link_config.orig_speed = tp->link_config.speed;
1099 tp->link_config.orig_duplex = tp->link_config.duplex;
1100 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1103 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1104 tp->link_config.speed = SPEED_10;
1105 tp->link_config.duplex = DUPLEX_HALF;
1106 tp->link_config.autoneg = AUTONEG_ENABLE;
1107 tg3_setup_phy(tp, 0);
1110 pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1112 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1115 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1116 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1119 mac_mode = MAC_MODE_PORT_MODE_MII;
1121 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1122 !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1123 mac_mode |= MAC_MODE_LINK_POLARITY;
1125 mac_mode = MAC_MODE_PORT_MODE_TBI;
1128 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1129 tw32(MAC_LED_CTRL, tp->led_ctrl);
1131 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1132 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1133 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1135 tw32_f(MAC_MODE, mac_mode);
1138 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1142 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1143 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1144 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1147 base_val = tp->pci_clock_ctrl;
1148 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1149 CLOCK_CTRL_TXCLK_DISABLE);
1151 tw32_f(TG3PCI_CLOCK_CTRL, base_val |
1153 CLOCK_CTRL_PWRDOWN_PLL133);
1155 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
1157 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1158 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1159 u32 newbits1, newbits2;
1161 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1162 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1163 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1164 CLOCK_CTRL_TXCLK_DISABLE |
1166 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1167 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1168 newbits1 = CLOCK_CTRL_625_CORE;
1169 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1171 newbits1 = CLOCK_CTRL_ALTCLK;
1172 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1175 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
1178 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
1181 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1184 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1185 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1186 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1187 CLOCK_CTRL_TXCLK_DISABLE |
1188 CLOCK_CTRL_44MHZ_CORE);
1190 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1193 tw32_f(TG3PCI_CLOCK_CTRL,
1194 tp->pci_clock_ctrl | newbits3);
1199 tg3_frob_aux_power(tp);
1201 /* Workaround for unstable PLL clock */
1202 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1203 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1204 u32 val = tr32(0x7d00);
1206 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1208 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1209 tg3_halt_cpu(tp, RX_CPU_BASE);
1212 /* Finally, set the new power state. */
1213 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1214 udelay(100); /* Delay after power state change */
1216 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1221 static void tg3_link_report(struct tg3 *tp)
1223 if (!netif_carrier_ok(tp->dev)) {
1224 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1226 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1228 (tp->link_config.active_speed == SPEED_1000 ?
1230 (tp->link_config.active_speed == SPEED_100 ?
1232 (tp->link_config.active_duplex == DUPLEX_FULL ?
1235 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1238 (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1239 (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1243 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1245 u32 new_tg3_flags = 0;
1246 u32 old_rx_mode = tp->rx_mode;
1247 u32 old_tx_mode = tp->tx_mode;
1249 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1251 /* Convert 1000BaseX flow control bits to 1000BaseT
1252 * bits before resolving flow control.
1254 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1255 local_adv &= ~(ADVERTISE_PAUSE_CAP |
1256 ADVERTISE_PAUSE_ASYM);
1257 remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1259 if (local_adv & ADVERTISE_1000XPAUSE)
1260 local_adv |= ADVERTISE_PAUSE_CAP;
1261 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1262 local_adv |= ADVERTISE_PAUSE_ASYM;
1263 if (remote_adv & LPA_1000XPAUSE)
1264 remote_adv |= LPA_PAUSE_CAP;
1265 if (remote_adv & LPA_1000XPAUSE_ASYM)
1266 remote_adv |= LPA_PAUSE_ASYM;
1269 if (local_adv & ADVERTISE_PAUSE_CAP) {
1270 if (local_adv & ADVERTISE_PAUSE_ASYM) {
1271 if (remote_adv & LPA_PAUSE_CAP)
1273 (TG3_FLAG_RX_PAUSE |
1275 else if (remote_adv & LPA_PAUSE_ASYM)
1277 (TG3_FLAG_RX_PAUSE);
1279 if (remote_adv & LPA_PAUSE_CAP)
1281 (TG3_FLAG_RX_PAUSE |
1284 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1285 if ((remote_adv & LPA_PAUSE_CAP) &&
1286 (remote_adv & LPA_PAUSE_ASYM))
1287 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1290 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1291 tp->tg3_flags |= new_tg3_flags;
1293 new_tg3_flags = tp->tg3_flags;
1296 if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1297 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1299 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1301 if (old_rx_mode != tp->rx_mode) {
1302 tw32_f(MAC_RX_MODE, tp->rx_mode);
1305 if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1306 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1308 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1310 if (old_tx_mode != tp->tx_mode) {
1311 tw32_f(MAC_TX_MODE, tp->tx_mode);
1315 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1317 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1318 case MII_TG3_AUX_STAT_10HALF:
1320 *duplex = DUPLEX_HALF;
1323 case MII_TG3_AUX_STAT_10FULL:
1325 *duplex = DUPLEX_FULL;
1328 case MII_TG3_AUX_STAT_100HALF:
1330 *duplex = DUPLEX_HALF;
1333 case MII_TG3_AUX_STAT_100FULL:
1335 *duplex = DUPLEX_FULL;
1338 case MII_TG3_AUX_STAT_1000HALF:
1339 *speed = SPEED_1000;
1340 *duplex = DUPLEX_HALF;
1343 case MII_TG3_AUX_STAT_1000FULL:
1344 *speed = SPEED_1000;
1345 *duplex = DUPLEX_FULL;
1349 *speed = SPEED_INVALID;
1350 *duplex = DUPLEX_INVALID;
1355 static void tg3_phy_copper_begin(struct tg3 *tp)
1360 if (tp->link_config.phy_is_low_power) {
1361 /* Entering low power mode. Disable gigabit and
1362 * 100baseT advertisements.
1364 tg3_writephy(tp, MII_TG3_CTRL, 0);
1366 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1367 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1368 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1369 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1371 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1372 } else if (tp->link_config.speed == SPEED_INVALID) {
1373 tp->link_config.advertising =
1374 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1375 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1376 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1377 ADVERTISED_Autoneg | ADVERTISED_MII);
1379 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1380 tp->link_config.advertising &=
1381 ~(ADVERTISED_1000baseT_Half |
1382 ADVERTISED_1000baseT_Full);
1384 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1385 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1386 new_adv |= ADVERTISE_10HALF;
1387 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1388 new_adv |= ADVERTISE_10FULL;
1389 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1390 new_adv |= ADVERTISE_100HALF;
1391 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1392 new_adv |= ADVERTISE_100FULL;
1393 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1395 if (tp->link_config.advertising &
1396 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1398 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1399 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1400 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1401 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1402 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1403 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1404 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1405 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1406 MII_TG3_CTRL_ENABLE_AS_MASTER);
1407 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1409 tg3_writephy(tp, MII_TG3_CTRL, 0);
1412 /* Asking for a specific link mode. */
1413 if (tp->link_config.speed == SPEED_1000) {
1414 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1415 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1417 if (tp->link_config.duplex == DUPLEX_FULL)
1418 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1420 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1421 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1422 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1423 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1424 MII_TG3_CTRL_ENABLE_AS_MASTER);
1425 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1427 tg3_writephy(tp, MII_TG3_CTRL, 0);
1429 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1430 if (tp->link_config.speed == SPEED_100) {
1431 if (tp->link_config.duplex == DUPLEX_FULL)
1432 new_adv |= ADVERTISE_100FULL;
1434 new_adv |= ADVERTISE_100HALF;
1436 if (tp->link_config.duplex == DUPLEX_FULL)
1437 new_adv |= ADVERTISE_10FULL;
1439 new_adv |= ADVERTISE_10HALF;
1441 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1445 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1446 tp->link_config.speed != SPEED_INVALID) {
1447 u32 bmcr, orig_bmcr;
1449 tp->link_config.active_speed = tp->link_config.speed;
1450 tp->link_config.active_duplex = tp->link_config.duplex;
1453 switch (tp->link_config.speed) {
1459 bmcr |= BMCR_SPEED100;
1463 bmcr |= TG3_BMCR_SPEED1000;
1467 if (tp->link_config.duplex == DUPLEX_FULL)
1468 bmcr |= BMCR_FULLDPLX;
1470 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1471 (bmcr != orig_bmcr)) {
1472 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1473 for (i = 0; i < 1500; i++) {
1477 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1478 tg3_readphy(tp, MII_BMSR, &tmp))
1480 if (!(tmp & BMSR_LSTATUS)) {
1485 tg3_writephy(tp, MII_BMCR, bmcr);
1489 tg3_writephy(tp, MII_BMCR,
1490 BMCR_ANENABLE | BMCR_ANRESTART);
1494 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1498 /* Turn off tap power management. */
1499 /* Set Extended packet length bit */
1500 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1502 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1503 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1505 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1506 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1508 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1509 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1511 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1512 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1514 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1515 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1522 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1524 u32 adv_reg, all_mask;
1526 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1529 all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1530 ADVERTISE_100HALF | ADVERTISE_100FULL);
1531 if ((adv_reg & all_mask) != all_mask)
1533 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1536 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1539 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1540 MII_TG3_CTRL_ADV_1000_FULL);
1541 if ((tg3_ctrl & all_mask) != all_mask)
1547 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1549 int current_link_up;
1558 (MAC_STATUS_SYNC_CHANGED |
1559 MAC_STATUS_CFG_CHANGED |
1560 MAC_STATUS_MI_COMPLETION |
1561 MAC_STATUS_LNKSTATE_CHANGED));
1564 tp->mi_mode = MAC_MI_MODE_BASE;
1565 tw32_f(MAC_MI_MODE, tp->mi_mode);
1568 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1570 /* Some third-party PHYs need to be reset on link going
1573 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1574 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1575 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1576 netif_carrier_ok(tp->dev)) {
1577 tg3_readphy(tp, MII_BMSR, &bmsr);
1578 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1579 !(bmsr & BMSR_LSTATUS))
1585 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1586 tg3_readphy(tp, MII_BMSR, &bmsr);
1587 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1588 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1591 if (!(bmsr & BMSR_LSTATUS)) {
1592 err = tg3_init_5401phy_dsp(tp);
1596 tg3_readphy(tp, MII_BMSR, &bmsr);
1597 for (i = 0; i < 1000; i++) {
1599 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1600 (bmsr & BMSR_LSTATUS)) {
1606 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1607 !(bmsr & BMSR_LSTATUS) &&
1608 tp->link_config.active_speed == SPEED_1000) {
1609 err = tg3_phy_reset(tp);
1611 err = tg3_init_5401phy_dsp(tp);
1616 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1617 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1618 /* 5701 {A0,B0} CRC bug workaround */
1619 tg3_writephy(tp, 0x15, 0x0a75);
1620 tg3_writephy(tp, 0x1c, 0x8c68);
1621 tg3_writephy(tp, 0x1c, 0x8d68);
1622 tg3_writephy(tp, 0x1c, 0x8c68);
1625 /* Clear pending interrupts... */
1626 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1627 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1629 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1630 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1632 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1634 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1635 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1636 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1637 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1638 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1640 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1643 current_link_up = 0;
1644 current_speed = SPEED_INVALID;
1645 current_duplex = DUPLEX_INVALID;
1647 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1650 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1651 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1652 if (!(val & (1 << 10))) {
1654 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1660 for (i = 0; i < 100; i++) {
1661 tg3_readphy(tp, MII_BMSR, &bmsr);
1662 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1663 (bmsr & BMSR_LSTATUS))
1668 if (bmsr & BMSR_LSTATUS) {
1671 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1672 for (i = 0; i < 2000; i++) {
1674 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1679 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1684 for (i = 0; i < 200; i++) {
1685 tg3_readphy(tp, MII_BMCR, &bmcr);
1686 if (tg3_readphy(tp, MII_BMCR, &bmcr))
1688 if (bmcr && bmcr != 0x7fff)
1693 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1694 if (bmcr & BMCR_ANENABLE) {
1695 current_link_up = 1;
1697 /* Force autoneg restart if we are exiting
1700 if (!tg3_copper_is_advertising_all(tp))
1701 current_link_up = 0;
1703 current_link_up = 0;
1706 if (!(bmcr & BMCR_ANENABLE) &&
1707 tp->link_config.speed == current_speed &&
1708 tp->link_config.duplex == current_duplex) {
1709 current_link_up = 1;
1711 current_link_up = 0;
1715 tp->link_config.active_speed = current_speed;
1716 tp->link_config.active_duplex = current_duplex;
1719 if (current_link_up == 1 &&
1720 (tp->link_config.active_duplex == DUPLEX_FULL) &&
1721 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1722 u32 local_adv, remote_adv;
1724 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1726 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1728 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1731 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1733 /* If we are not advertising full pause capability,
1734 * something is wrong. Bring the link down and reconfigure.
1736 if (local_adv != ADVERTISE_PAUSE_CAP) {
1737 current_link_up = 0;
1739 tg3_setup_flow_control(tp, local_adv, remote_adv);
1743 if (current_link_up == 0) {
1746 tg3_phy_copper_begin(tp);
1748 tg3_readphy(tp, MII_BMSR, &tmp);
1749 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1750 (tmp & BMSR_LSTATUS))
1751 current_link_up = 1;
1754 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1755 if (current_link_up == 1) {
1756 if (tp->link_config.active_speed == SPEED_100 ||
1757 tp->link_config.active_speed == SPEED_10)
1758 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1760 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1762 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1764 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1765 if (tp->link_config.active_duplex == DUPLEX_HALF)
1766 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1768 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1769 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1770 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1771 (current_link_up == 1 &&
1772 tp->link_config.active_speed == SPEED_10))
1773 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1775 if (current_link_up == 1)
1776 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1779 /* ??? Without this setting Netgear GA302T PHY does not
1780 * ??? send/receive packets...
1782 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1783 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1784 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1785 tw32_f(MAC_MI_MODE, tp->mi_mode);
1789 tw32_f(MAC_MODE, tp->mac_mode);
1792 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1793 /* Polled via timer. */
1794 tw32_f(MAC_EVENT, 0);
1796 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1800 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1801 current_link_up == 1 &&
1802 tp->link_config.active_speed == SPEED_1000 &&
1803 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1804 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1807 (MAC_STATUS_SYNC_CHANGED |
1808 MAC_STATUS_CFG_CHANGED));
1811 NIC_SRAM_FIRMWARE_MBOX,
1812 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1815 if (current_link_up != netif_carrier_ok(tp->dev)) {
1816 if (current_link_up)
1817 netif_carrier_on(tp->dev);
1819 netif_carrier_off(tp->dev);
1820 tg3_link_report(tp);
1826 struct tg3_fiber_aneginfo {
1828 #define ANEG_STATE_UNKNOWN 0
1829 #define ANEG_STATE_AN_ENABLE 1
1830 #define ANEG_STATE_RESTART_INIT 2
1831 #define ANEG_STATE_RESTART 3
1832 #define ANEG_STATE_DISABLE_LINK_OK 4
1833 #define ANEG_STATE_ABILITY_DETECT_INIT 5
1834 #define ANEG_STATE_ABILITY_DETECT 6
1835 #define ANEG_STATE_ACK_DETECT_INIT 7
1836 #define ANEG_STATE_ACK_DETECT 8
1837 #define ANEG_STATE_COMPLETE_ACK_INIT 9
1838 #define ANEG_STATE_COMPLETE_ACK 10
1839 #define ANEG_STATE_IDLE_DETECT_INIT 11
1840 #define ANEG_STATE_IDLE_DETECT 12
1841 #define ANEG_STATE_LINK_OK 13
1842 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
1843 #define ANEG_STATE_NEXT_PAGE_WAIT 15
1846 #define MR_AN_ENABLE 0x00000001
1847 #define MR_RESTART_AN 0x00000002
1848 #define MR_AN_COMPLETE 0x00000004
1849 #define MR_PAGE_RX 0x00000008
1850 #define MR_NP_LOADED 0x00000010
1851 #define MR_TOGGLE_TX 0x00000020
1852 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
1853 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
1854 #define MR_LP_ADV_SYM_PAUSE 0x00000100
1855 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
1856 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1857 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1858 #define MR_LP_ADV_NEXT_PAGE 0x00001000
1859 #define MR_TOGGLE_RX 0x00002000
1860 #define MR_NP_RX 0x00004000
1862 #define MR_LINK_OK 0x80000000
1864 unsigned long link_time, cur_time;
1866 u32 ability_match_cfg;
1867 int ability_match_count;
1869 char ability_match, idle_match, ack_match;
1871 u32 txconfig, rxconfig;
1872 #define ANEG_CFG_NP 0x00000080
1873 #define ANEG_CFG_ACK 0x00000040
1874 #define ANEG_CFG_RF2 0x00000020
1875 #define ANEG_CFG_RF1 0x00000010
1876 #define ANEG_CFG_PS2 0x00000001
1877 #define ANEG_CFG_PS1 0x00008000
1878 #define ANEG_CFG_HD 0x00004000
1879 #define ANEG_CFG_FD 0x00002000
1880 #define ANEG_CFG_INVAL 0x00001f06
1885 #define ANEG_TIMER_ENAB 2
1886 #define ANEG_FAILED -1
1888 #define ANEG_STATE_SETTLE_TIME 10000
1890 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1891 struct tg3_fiber_aneginfo *ap)
1893 unsigned long delta;
1897 if (ap->state == ANEG_STATE_UNKNOWN) {
1901 ap->ability_match_cfg = 0;
1902 ap->ability_match_count = 0;
1903 ap->ability_match = 0;
1909 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1910 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1912 if (rx_cfg_reg != ap->ability_match_cfg) {
1913 ap->ability_match_cfg = rx_cfg_reg;
1914 ap->ability_match = 0;
1915 ap->ability_match_count = 0;
1917 if (++ap->ability_match_count > 1) {
1918 ap->ability_match = 1;
1919 ap->ability_match_cfg = rx_cfg_reg;
1922 if (rx_cfg_reg & ANEG_CFG_ACK)
1930 ap->ability_match_cfg = 0;
1931 ap->ability_match_count = 0;
1932 ap->ability_match = 0;
1938 ap->rxconfig = rx_cfg_reg;
1942 case ANEG_STATE_UNKNOWN:
1943 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
1944 ap->state = ANEG_STATE_AN_ENABLE;
1947 case ANEG_STATE_AN_ENABLE:
1948 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
1949 if (ap->flags & MR_AN_ENABLE) {
1952 ap->ability_match_cfg = 0;
1953 ap->ability_match_count = 0;
1954 ap->ability_match = 0;
1958 ap->state = ANEG_STATE_RESTART_INIT;
1960 ap->state = ANEG_STATE_DISABLE_LINK_OK;
1964 case ANEG_STATE_RESTART_INIT:
1965 ap->link_time = ap->cur_time;
1966 ap->flags &= ~(MR_NP_LOADED);
1968 tw32(MAC_TX_AUTO_NEG, 0);
1969 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1970 tw32_f(MAC_MODE, tp->mac_mode);
1973 ret = ANEG_TIMER_ENAB;
1974 ap->state = ANEG_STATE_RESTART;
1977 case ANEG_STATE_RESTART:
1978 delta = ap->cur_time - ap->link_time;
1979 if (delta > ANEG_STATE_SETTLE_TIME) {
1980 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
1982 ret = ANEG_TIMER_ENAB;
1986 case ANEG_STATE_DISABLE_LINK_OK:
1990 case ANEG_STATE_ABILITY_DETECT_INIT:
1991 ap->flags &= ~(MR_TOGGLE_TX);
1992 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
1993 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1994 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1995 tw32_f(MAC_MODE, tp->mac_mode);
1998 ap->state = ANEG_STATE_ABILITY_DETECT;
2001 case ANEG_STATE_ABILITY_DETECT:
2002 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2003 ap->state = ANEG_STATE_ACK_DETECT_INIT;
2007 case ANEG_STATE_ACK_DETECT_INIT:
2008 ap->txconfig |= ANEG_CFG_ACK;
2009 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2010 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2011 tw32_f(MAC_MODE, tp->mac_mode);
2014 ap->state = ANEG_STATE_ACK_DETECT;
2017 case ANEG_STATE_ACK_DETECT:
2018 if (ap->ack_match != 0) {
2019 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2020 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2021 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2023 ap->state = ANEG_STATE_AN_ENABLE;
2025 } else if (ap->ability_match != 0 &&
2026 ap->rxconfig == 0) {
2027 ap->state = ANEG_STATE_AN_ENABLE;
2031 case ANEG_STATE_COMPLETE_ACK_INIT:
2032 if (ap->rxconfig & ANEG_CFG_INVAL) {
2036 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2037 MR_LP_ADV_HALF_DUPLEX |
2038 MR_LP_ADV_SYM_PAUSE |
2039 MR_LP_ADV_ASYM_PAUSE |
2040 MR_LP_ADV_REMOTE_FAULT1 |
2041 MR_LP_ADV_REMOTE_FAULT2 |
2042 MR_LP_ADV_NEXT_PAGE |
2045 if (ap->rxconfig & ANEG_CFG_FD)
2046 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2047 if (ap->rxconfig & ANEG_CFG_HD)
2048 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2049 if (ap->rxconfig & ANEG_CFG_PS1)
2050 ap->flags |= MR_LP_ADV_SYM_PAUSE;
2051 if (ap->rxconfig & ANEG_CFG_PS2)
2052 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2053 if (ap->rxconfig & ANEG_CFG_RF1)
2054 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2055 if (ap->rxconfig & ANEG_CFG_RF2)
2056 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2057 if (ap->rxconfig & ANEG_CFG_NP)
2058 ap->flags |= MR_LP_ADV_NEXT_PAGE;
2060 ap->link_time = ap->cur_time;
2062 ap->flags ^= (MR_TOGGLE_TX);
2063 if (ap->rxconfig & 0x0008)
2064 ap->flags |= MR_TOGGLE_RX;
2065 if (ap->rxconfig & ANEG_CFG_NP)
2066 ap->flags |= MR_NP_RX;
2067 ap->flags |= MR_PAGE_RX;
2069 ap->state = ANEG_STATE_COMPLETE_ACK;
2070 ret = ANEG_TIMER_ENAB;
2073 case ANEG_STATE_COMPLETE_ACK:
2074 if (ap->ability_match != 0 &&
2075 ap->rxconfig == 0) {
2076 ap->state = ANEG_STATE_AN_ENABLE;
2079 delta = ap->cur_time - ap->link_time;
2080 if (delta > ANEG_STATE_SETTLE_TIME) {
2081 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2082 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2084 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2085 !(ap->flags & MR_NP_RX)) {
2086 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2094 case ANEG_STATE_IDLE_DETECT_INIT:
2095 ap->link_time = ap->cur_time;
2096 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2097 tw32_f(MAC_MODE, tp->mac_mode);
2100 ap->state = ANEG_STATE_IDLE_DETECT;
2101 ret = ANEG_TIMER_ENAB;
2104 case ANEG_STATE_IDLE_DETECT:
2105 if (ap->ability_match != 0 &&
2106 ap->rxconfig == 0) {
2107 ap->state = ANEG_STATE_AN_ENABLE;
2110 delta = ap->cur_time - ap->link_time;
2111 if (delta > ANEG_STATE_SETTLE_TIME) {
2112 /* XXX another gem from the Broadcom driver :( */
2113 ap->state = ANEG_STATE_LINK_OK;
2117 case ANEG_STATE_LINK_OK:
2118 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2122 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2123 /* ??? unimplemented */
2126 case ANEG_STATE_NEXT_PAGE_WAIT:
2127 /* ??? unimplemented */
2138 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2141 struct tg3_fiber_aneginfo aninfo;
2142 int status = ANEG_FAILED;
2146 tw32_f(MAC_TX_AUTO_NEG, 0);
2148 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2149 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2152 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2155 memset(&aninfo, 0, sizeof(aninfo));
2156 aninfo.flags |= MR_AN_ENABLE;
2157 aninfo.state = ANEG_STATE_UNKNOWN;
2158 aninfo.cur_time = 0;
2160 while (++tick < 195000) {
2161 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2162 if (status == ANEG_DONE || status == ANEG_FAILED)
2168 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2169 tw32_f(MAC_MODE, tp->mac_mode);
2172 *flags = aninfo.flags;
2174 if (status == ANEG_DONE &&
2175 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2176 MR_LP_ADV_FULL_DUPLEX)))
2182 static void tg3_init_bcm8002(struct tg3 *tp)
2184 u32 mac_status = tr32(MAC_STATUS);
2187 /* Reset when initting first time or we have a link. */
2188 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2189 !(mac_status & MAC_STATUS_PCS_SYNCED))
2192 /* Set PLL lock range. */
2193 tg3_writephy(tp, 0x16, 0x8007);
2196 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2198 /* Wait for reset to complete. */
2199 /* XXX schedule_timeout() ... */
2200 for (i = 0; i < 500; i++)
2203 /* Config mode; select PMA/Ch 1 regs. */
2204 tg3_writephy(tp, 0x10, 0x8411);
2206 /* Enable auto-lock and comdet, select txclk for tx. */
2207 tg3_writephy(tp, 0x11, 0x0a10);
2209 tg3_writephy(tp, 0x18, 0x00a0);
2210 tg3_writephy(tp, 0x16, 0x41ff);
2212 /* Assert and deassert POR. */
2213 tg3_writephy(tp, 0x13, 0x0400);
2215 tg3_writephy(tp, 0x13, 0x0000);
2217 tg3_writephy(tp, 0x11, 0x0a50);
2219 tg3_writephy(tp, 0x11, 0x0a10);
2221 /* Wait for signal to stabilize */
2222 /* XXX schedule_timeout() ... */
2223 for (i = 0; i < 15000; i++)
2226 /* Deselect the channel register so we can read the PHYID
2229 tg3_writephy(tp, 0x10, 0x8011);
2232 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2234 u32 sg_dig_ctrl, sg_dig_status;
2235 u32 serdes_cfg, expected_sg_dig_ctrl;
2236 int workaround, port_a;
2237 int current_link_up;
2240 expected_sg_dig_ctrl = 0;
2243 current_link_up = 0;
2245 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2246 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2248 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2251 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2252 /* preserve bits 20-23 for voltage regulator */
2253 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2256 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2258 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2259 if (sg_dig_ctrl & (1 << 31)) {
2261 u32 val = serdes_cfg;
2267 tw32_f(MAC_SERDES_CFG, val);
2269 tw32_f(SG_DIG_CTRL, 0x01388400);
2271 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2272 tg3_setup_flow_control(tp, 0, 0);
2273 current_link_up = 1;
2278 /* Want auto-negotiation. */
2279 expected_sg_dig_ctrl = 0x81388400;
2281 /* Pause capability */
2282 expected_sg_dig_ctrl |= (1 << 11);
2284 /* Asymettric pause */
2285 expected_sg_dig_ctrl |= (1 << 12);
2287 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2289 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2290 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2292 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2294 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2295 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2296 MAC_STATUS_SIGNAL_DET)) {
2299 /* Giver time to negotiate (~200ms) */
2300 for (i = 0; i < 40000; i++) {
2301 sg_dig_status = tr32(SG_DIG_STATUS);
2302 if (sg_dig_status & (0x3))
2306 mac_status = tr32(MAC_STATUS);
2308 if ((sg_dig_status & (1 << 1)) &&
2309 (mac_status & MAC_STATUS_PCS_SYNCED)) {
2310 u32 local_adv, remote_adv;
2312 local_adv = ADVERTISE_PAUSE_CAP;
2314 if (sg_dig_status & (1 << 19))
2315 remote_adv |= LPA_PAUSE_CAP;
2316 if (sg_dig_status & (1 << 20))
2317 remote_adv |= LPA_PAUSE_ASYM;
2319 tg3_setup_flow_control(tp, local_adv, remote_adv);
2320 current_link_up = 1;
2321 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2322 } else if (!(sg_dig_status & (1 << 1))) {
2323 if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2324 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2327 u32 val = serdes_cfg;
2334 tw32_f(MAC_SERDES_CFG, val);
2337 tw32_f(SG_DIG_CTRL, 0x01388400);
2340 /* Link parallel detection - link is up */
2341 /* only if we have PCS_SYNC and not */
2342 /* receiving config code words */
2343 mac_status = tr32(MAC_STATUS);
2344 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2345 !(mac_status & MAC_STATUS_RCVD_CFG)) {
2346 tg3_setup_flow_control(tp, 0, 0);
2347 current_link_up = 1;
2354 return current_link_up;
2357 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2359 int current_link_up = 0;
2361 if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2362 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2366 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2370 if (fiber_autoneg(tp, &flags)) {
2371 u32 local_adv, remote_adv;
2373 local_adv = ADVERTISE_PAUSE_CAP;
2375 if (flags & MR_LP_ADV_SYM_PAUSE)
2376 remote_adv |= LPA_PAUSE_CAP;
2377 if (flags & MR_LP_ADV_ASYM_PAUSE)
2378 remote_adv |= LPA_PAUSE_ASYM;
2380 tg3_setup_flow_control(tp, local_adv, remote_adv);
2382 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2383 current_link_up = 1;
2385 for (i = 0; i < 30; i++) {
2388 (MAC_STATUS_SYNC_CHANGED |
2389 MAC_STATUS_CFG_CHANGED));
2391 if ((tr32(MAC_STATUS) &
2392 (MAC_STATUS_SYNC_CHANGED |
2393 MAC_STATUS_CFG_CHANGED)) == 0)
2397 mac_status = tr32(MAC_STATUS);
2398 if (current_link_up == 0 &&
2399 (mac_status & MAC_STATUS_PCS_SYNCED) &&
2400 !(mac_status & MAC_STATUS_RCVD_CFG))
2401 current_link_up = 1;
2403 /* Forcing 1000FD link up. */
2404 current_link_up = 1;
2405 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2407 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2412 return current_link_up;
2415 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2418 u16 orig_active_speed;
2419 u8 orig_active_duplex;
2421 int current_link_up;
2425 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2426 TG3_FLAG_TX_PAUSE));
2427 orig_active_speed = tp->link_config.active_speed;
2428 orig_active_duplex = tp->link_config.active_duplex;
2430 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2431 netif_carrier_ok(tp->dev) &&
2432 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2433 mac_status = tr32(MAC_STATUS);
2434 mac_status &= (MAC_STATUS_PCS_SYNCED |
2435 MAC_STATUS_SIGNAL_DET |
2436 MAC_STATUS_CFG_CHANGED |
2437 MAC_STATUS_RCVD_CFG);
2438 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2439 MAC_STATUS_SIGNAL_DET)) {
2440 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2441 MAC_STATUS_CFG_CHANGED));
2446 tw32_f(MAC_TX_AUTO_NEG, 0);
2448 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2449 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2450 tw32_f(MAC_MODE, tp->mac_mode);
2453 if (tp->phy_id == PHY_ID_BCM8002)
2454 tg3_init_bcm8002(tp);
2456 /* Enable link change event even when serdes polling. */
2457 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2460 current_link_up = 0;
2461 mac_status = tr32(MAC_STATUS);
2463 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2464 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2466 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2468 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2469 tw32_f(MAC_MODE, tp->mac_mode);
2472 tp->hw_status->status =
2473 (SD_STATUS_UPDATED |
2474 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2476 for (i = 0; i < 100; i++) {
2477 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2478 MAC_STATUS_CFG_CHANGED));
2480 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2481 MAC_STATUS_CFG_CHANGED)) == 0)
2485 mac_status = tr32(MAC_STATUS);
2486 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2487 current_link_up = 0;
2488 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2489 tw32_f(MAC_MODE, (tp->mac_mode |
2490 MAC_MODE_SEND_CONFIGS));
2492 tw32_f(MAC_MODE, tp->mac_mode);
2496 if (current_link_up == 1) {
2497 tp->link_config.active_speed = SPEED_1000;
2498 tp->link_config.active_duplex = DUPLEX_FULL;
2499 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2500 LED_CTRL_LNKLED_OVERRIDE |
2501 LED_CTRL_1000MBPS_ON));
2503 tp->link_config.active_speed = SPEED_INVALID;
2504 tp->link_config.active_duplex = DUPLEX_INVALID;
2505 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2506 LED_CTRL_LNKLED_OVERRIDE |
2507 LED_CTRL_TRAFFIC_OVERRIDE));
2510 if (current_link_up != netif_carrier_ok(tp->dev)) {
2511 if (current_link_up)
2512 netif_carrier_on(tp->dev);
2514 netif_carrier_off(tp->dev);
2515 tg3_link_report(tp);
2518 tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2520 if (orig_pause_cfg != now_pause_cfg ||
2521 orig_active_speed != tp->link_config.active_speed ||
2522 orig_active_duplex != tp->link_config.active_duplex)
2523 tg3_link_report(tp);
2529 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2531 int current_link_up, err = 0;
2536 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2537 tw32_f(MAC_MODE, tp->mac_mode);
2543 (MAC_STATUS_SYNC_CHANGED |
2544 MAC_STATUS_CFG_CHANGED |
2545 MAC_STATUS_MI_COMPLETION |
2546 MAC_STATUS_LNKSTATE_CHANGED));
2552 current_link_up = 0;
2553 current_speed = SPEED_INVALID;
2554 current_duplex = DUPLEX_INVALID;
2556 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2557 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2559 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2561 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2562 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2563 /* do nothing, just check for link up at the end */
2564 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2567 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2568 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2569 ADVERTISE_1000XPAUSE |
2570 ADVERTISE_1000XPSE_ASYM |
2573 /* Always advertise symmetric PAUSE just like copper */
2574 new_adv |= ADVERTISE_1000XPAUSE;
2576 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2577 new_adv |= ADVERTISE_1000XHALF;
2578 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2579 new_adv |= ADVERTISE_1000XFULL;
2581 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2582 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2583 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2584 tg3_writephy(tp, MII_BMCR, bmcr);
2586 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2587 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2588 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2595 bmcr &= ~BMCR_SPEED1000;
2596 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2598 if (tp->link_config.duplex == DUPLEX_FULL)
2599 new_bmcr |= BMCR_FULLDPLX;
2601 if (new_bmcr != bmcr) {
2602 /* BMCR_SPEED1000 is a reserved bit that needs
2603 * to be set on write.
2605 new_bmcr |= BMCR_SPEED1000;
2607 /* Force a linkdown */
2608 if (netif_carrier_ok(tp->dev)) {
2611 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2612 adv &= ~(ADVERTISE_1000XFULL |
2613 ADVERTISE_1000XHALF |
2615 tg3_writephy(tp, MII_ADVERTISE, adv);
2616 tg3_writephy(tp, MII_BMCR, bmcr |
2620 netif_carrier_off(tp->dev);
2622 tg3_writephy(tp, MII_BMCR, new_bmcr);
2624 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2625 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2626 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2630 if (bmsr & BMSR_LSTATUS) {
2631 current_speed = SPEED_1000;
2632 current_link_up = 1;
2633 if (bmcr & BMCR_FULLDPLX)
2634 current_duplex = DUPLEX_FULL;
2636 current_duplex = DUPLEX_HALF;
2638 if (bmcr & BMCR_ANENABLE) {
2639 u32 local_adv, remote_adv, common;
2641 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2642 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2643 common = local_adv & remote_adv;
2644 if (common & (ADVERTISE_1000XHALF |
2645 ADVERTISE_1000XFULL)) {
2646 if (common & ADVERTISE_1000XFULL)
2647 current_duplex = DUPLEX_FULL;
2649 current_duplex = DUPLEX_HALF;
2651 tg3_setup_flow_control(tp, local_adv,
2655 current_link_up = 0;
2659 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2660 if (tp->link_config.active_duplex == DUPLEX_HALF)
2661 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2663 tw32_f(MAC_MODE, tp->mac_mode);
2666 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2668 tp->link_config.active_speed = current_speed;
2669 tp->link_config.active_duplex = current_duplex;
2671 if (current_link_up != netif_carrier_ok(tp->dev)) {
2672 if (current_link_up)
2673 netif_carrier_on(tp->dev);
2675 netif_carrier_off(tp->dev);
2676 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2678 tg3_link_report(tp);
2683 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2685 if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2686 /* Give autoneg time to complete. */
2687 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2690 if (!netif_carrier_ok(tp->dev) &&
2691 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2694 tg3_readphy(tp, MII_BMCR, &bmcr);
2695 if (bmcr & BMCR_ANENABLE) {
2698 /* Select shadow register 0x1f */
2699 tg3_writephy(tp, 0x1c, 0x7c00);
2700 tg3_readphy(tp, 0x1c, &phy1);
2702 /* Select expansion interrupt status register */
2703 tg3_writephy(tp, 0x17, 0x0f01);
2704 tg3_readphy(tp, 0x15, &phy2);
2705 tg3_readphy(tp, 0x15, &phy2);
2707 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2708 /* We have signal detect and not receiving
2709 * config code words, link is up by parallel
2713 bmcr &= ~BMCR_ANENABLE;
2714 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2715 tg3_writephy(tp, MII_BMCR, bmcr);
2716 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2720 else if (netif_carrier_ok(tp->dev) &&
2721 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2722 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2725 /* Select expansion interrupt status register */
2726 tg3_writephy(tp, 0x17, 0x0f01);
2727 tg3_readphy(tp, 0x15, &phy2);
2731 /* Config code words received, turn on autoneg. */
2732 tg3_readphy(tp, MII_BMCR, &bmcr);
2733 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2735 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2741 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2745 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2746 err = tg3_setup_fiber_phy(tp, force_reset);
2747 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2748 err = tg3_setup_fiber_mii_phy(tp, force_reset);
2750 err = tg3_setup_copper_phy(tp, force_reset);
2753 if (tp->link_config.active_speed == SPEED_1000 &&
2754 tp->link_config.active_duplex == DUPLEX_HALF)
2755 tw32(MAC_TX_LENGTHS,
2756 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2757 (6 << TX_LENGTHS_IPG_SHIFT) |
2758 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2760 tw32(MAC_TX_LENGTHS,
2761 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2762 (6 << TX_LENGTHS_IPG_SHIFT) |
2763 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2765 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2766 if (netif_carrier_ok(tp->dev)) {
2767 tw32(HOSTCC_STAT_COAL_TICKS,
2768 tp->coal.stats_block_coalesce_usecs);
2770 tw32(HOSTCC_STAT_COAL_TICKS, 0);
2777 /* Tigon3 never reports partial packet sends. So we do not
2778 * need special logic to handle SKBs that have not had all
2779 * of their frags sent yet, like SunGEM does.
2781 static void tg3_tx(struct tg3 *tp)
2783 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2784 u32 sw_idx = tp->tx_cons;
2786 while (sw_idx != hw_idx) {
2787 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2788 struct sk_buff *skb = ri->skb;
2791 if (unlikely(skb == NULL))
2794 pci_unmap_single(tp->pdev,
2795 pci_unmap_addr(ri, mapping),
2801 sw_idx = NEXT_TX(sw_idx);
2803 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2804 if (unlikely(sw_idx == hw_idx))
2807 ri = &tp->tx_buffers[sw_idx];
2808 if (unlikely(ri->skb != NULL))
2811 pci_unmap_page(tp->pdev,
2812 pci_unmap_addr(ri, mapping),
2813 skb_shinfo(skb)->frags[i].size,
2816 sw_idx = NEXT_TX(sw_idx);
2822 tp->tx_cons = sw_idx;
2824 if (netif_queue_stopped(tp->dev) &&
2825 (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2826 netif_wake_queue(tp->dev);
2829 /* Returns size of skb allocated or < 0 on error.
2831 * We only need to fill in the address because the other members
2832 * of the RX descriptor are invariant, see tg3_init_rings.
2834 * Note the purposeful assymetry of cpu vs. chip accesses. For
2835 * posting buffers we only dirty the first cache line of the RX
2836 * descriptor (containing the address). Whereas for the RX status
2837 * buffers the cpu only reads the last cacheline of the RX descriptor
2838 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2840 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2841 int src_idx, u32 dest_idx_unmasked)
2843 struct tg3_rx_buffer_desc *desc;
2844 struct ring_info *map, *src_map;
2845 struct sk_buff *skb;
2847 int skb_size, dest_idx;
2850 switch (opaque_key) {
2851 case RXD_OPAQUE_RING_STD:
2852 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2853 desc = &tp->rx_std[dest_idx];
2854 map = &tp->rx_std_buffers[dest_idx];
2856 src_map = &tp->rx_std_buffers[src_idx];
2857 skb_size = tp->rx_pkt_buf_sz;
2860 case RXD_OPAQUE_RING_JUMBO:
2861 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2862 desc = &tp->rx_jumbo[dest_idx];
2863 map = &tp->rx_jumbo_buffers[dest_idx];
2865 src_map = &tp->rx_jumbo_buffers[src_idx];
2866 skb_size = RX_JUMBO_PKT_BUF_SZ;
2873 /* Do not overwrite any of the map or rp information
2874 * until we are sure we can commit to a new buffer.
2876 * Callers depend upon this behavior and assume that
2877 * we leave everything unchanged if we fail.
2879 skb = dev_alloc_skb(skb_size);
2884 skb_reserve(skb, tp->rx_offset);
2886 mapping = pci_map_single(tp->pdev, skb->data,
2887 skb_size - tp->rx_offset,
2888 PCI_DMA_FROMDEVICE);
2891 pci_unmap_addr_set(map, mapping, mapping);
2893 if (src_map != NULL)
2894 src_map->skb = NULL;
2896 desc->addr_hi = ((u64)mapping >> 32);
2897 desc->addr_lo = ((u64)mapping & 0xffffffff);
2902 /* We only need to move over in the address because the other
2903 * members of the RX descriptor are invariant. See notes above
2904 * tg3_alloc_rx_skb for full details.
2906 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
2907 int src_idx, u32 dest_idx_unmasked)
2909 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
2910 struct ring_info *src_map, *dest_map;
2913 switch (opaque_key) {
2914 case RXD_OPAQUE_RING_STD:
2915 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2916 dest_desc = &tp->rx_std[dest_idx];
2917 dest_map = &tp->rx_std_buffers[dest_idx];
2918 src_desc = &tp->rx_std[src_idx];
2919 src_map = &tp->rx_std_buffers[src_idx];
2922 case RXD_OPAQUE_RING_JUMBO:
2923 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2924 dest_desc = &tp->rx_jumbo[dest_idx];
2925 dest_map = &tp->rx_jumbo_buffers[dest_idx];
2926 src_desc = &tp->rx_jumbo[src_idx];
2927 src_map = &tp->rx_jumbo_buffers[src_idx];
2934 dest_map->skb = src_map->skb;
2935 pci_unmap_addr_set(dest_map, mapping,
2936 pci_unmap_addr(src_map, mapping));
2937 dest_desc->addr_hi = src_desc->addr_hi;
2938 dest_desc->addr_lo = src_desc->addr_lo;
2940 src_map->skb = NULL;
2943 #if TG3_VLAN_TAG_USED
2944 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
2946 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
2950 /* The RX ring scheme is composed of multiple rings which post fresh
2951 * buffers to the chip, and one special ring the chip uses to report
2952 * status back to the host.
2954 * The special ring reports the status of received packets to the
2955 * host. The chip does not write into the original descriptor the
2956 * RX buffer was obtained from. The chip simply takes the original
2957 * descriptor as provided by the host, updates the status and length
2958 * field, then writes this into the next status ring entry.
2960 * Each ring the host uses to post buffers to the chip is described
2961 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
2962 * it is first placed into the on-chip ram. When the packet's length
2963 * is known, it walks down the TG3_BDINFO entries to select the ring.
2964 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
2965 * which is within the range of the new packet's length is chosen.
2967 * The "separate ring for rx status" scheme may sound queer, but it makes
2968 * sense from a cache coherency perspective. If only the host writes
2969 * to the buffer post rings, and only the chip writes to the rx status
2970 * rings, then cache lines never move beyond shared-modified state.
2971 * If both the host and chip were to write into the same ring, cache line
2972 * eviction could occur since both entities want it in an exclusive state.
2974 static int tg3_rx(struct tg3 *tp, int budget)
2977 u32 sw_idx = tp->rx_rcb_ptr;
2981 hw_idx = tp->hw_status->idx[0].rx_producer;
2983 * We need to order the read of hw_idx and the read of
2984 * the opaque cookie.
2989 while (sw_idx != hw_idx && budget > 0) {
2990 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
2992 struct sk_buff *skb;
2993 dma_addr_t dma_addr;
2994 u32 opaque_key, desc_idx, *post_ptr;
2996 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
2997 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
2998 if (opaque_key == RXD_OPAQUE_RING_STD) {
2999 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3001 skb = tp->rx_std_buffers[desc_idx].skb;
3002 post_ptr = &tp->rx_std_ptr;
3003 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3004 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3006 skb = tp->rx_jumbo_buffers[desc_idx].skb;
3007 post_ptr = &tp->rx_jumbo_ptr;
3010 goto next_pkt_nopost;
3013 work_mask |= opaque_key;
3015 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3016 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3018 tg3_recycle_rx(tp, opaque_key,
3019 desc_idx, *post_ptr);
3021 /* Other statistics kept track of by card. */
3022 tp->net_stats.rx_dropped++;
3026 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3028 if (len > RX_COPY_THRESHOLD
3029 && tp->rx_offset == 2
3030 /* rx_offset != 2 iff this is a 5701 card running
3031 * in PCI-X mode [see tg3_get_invariants()] */
3035 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3036 desc_idx, *post_ptr);
3040 pci_unmap_single(tp->pdev, dma_addr,
3041 skb_size - tp->rx_offset,
3042 PCI_DMA_FROMDEVICE);
3046 struct sk_buff *copy_skb;
3048 tg3_recycle_rx(tp, opaque_key,
3049 desc_idx, *post_ptr);
3051 copy_skb = dev_alloc_skb(len + 2);
3052 if (copy_skb == NULL)
3053 goto drop_it_no_recycle;
3055 copy_skb->dev = tp->dev;
3056 skb_reserve(copy_skb, 2);
3057 skb_put(copy_skb, len);
3058 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3059 memcpy(copy_skb->data, skb->data, len);
3060 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3062 /* We'll reuse the original ring buffer. */
3066 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3067 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3068 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3069 >> RXD_TCPCSUM_SHIFT) == 0xffff))
3070 skb->ip_summed = CHECKSUM_UNNECESSARY;
3072 skb->ip_summed = CHECKSUM_NONE;
3074 skb->protocol = eth_type_trans(skb, tp->dev);
3075 #if TG3_VLAN_TAG_USED
3076 if (tp->vlgrp != NULL &&
3077 desc->type_flags & RXD_FLAG_VLAN) {
3078 tg3_vlan_rx(tp, skb,
3079 desc->err_vlan & RXD_VLAN_MASK);
3082 netif_receive_skb(skb);
3084 tp->dev->last_rx = jiffies;
3092 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
3094 /* Refresh hw_idx to see if there is new work */
3095 if (sw_idx == hw_idx) {
3096 hw_idx = tp->hw_status->idx[0].rx_producer;
3101 /* ACK the status ring. */
3102 tp->rx_rcb_ptr = sw_idx;
3103 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3105 /* Refill RX ring(s). */
3106 if (work_mask & RXD_OPAQUE_RING_STD) {
3107 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3108 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3111 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3112 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3113 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3121 static int tg3_poll(struct net_device *netdev, int *budget)
3123 struct tg3 *tp = netdev_priv(netdev);
3124 struct tg3_hw_status *sblk = tp->hw_status;
3127 /* handle link change and other phy events */
3128 if (!(tp->tg3_flags &
3129 (TG3_FLAG_USE_LINKCHG_REG |
3130 TG3_FLAG_POLL_SERDES))) {
3131 if (sblk->status & SD_STATUS_LINK_CHG) {
3132 sblk->status = SD_STATUS_UPDATED |
3133 (sblk->status & ~SD_STATUS_LINK_CHG);
3134 spin_lock(&tp->lock);
3135 tg3_setup_phy(tp, 0);
3136 spin_unlock(&tp->lock);
3140 /* run TX completion thread */
3141 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3142 spin_lock(&tp->tx_lock);
3144 spin_unlock(&tp->tx_lock);
3147 /* run RX thread, within the bounds set by NAPI.
3148 * All RX "locking" is done by ensuring outside
3149 * code synchronizes with dev->poll()
3151 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3152 int orig_budget = *budget;
3155 if (orig_budget > netdev->quota)
3156 orig_budget = netdev->quota;
3158 work_done = tg3_rx(tp, orig_budget);
3160 *budget -= work_done;
3161 netdev->quota -= work_done;
3164 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
3165 tp->last_tag = sblk->status_tag;
3167 sblk->status &= ~SD_STATUS_UPDATED;
3169 /* if no more work, tell net stack and NIC we're done */
3170 done = !tg3_has_work(tp);
3172 spin_lock(&tp->lock);
3173 netif_rx_complete(netdev);
3174 tg3_restart_ints(tp);
3175 spin_unlock(&tp->lock);
3178 return (done ? 0 : 1);
3181 static void tg3_irq_quiesce(struct tg3 *tp)
3183 BUG_ON(tp->irq_sync);
3188 synchronize_irq(tp->pdev->irq);
3191 static inline int tg3_irq_sync(struct tg3 *tp)
3193 return tp->irq_sync;
3196 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3197 * If irq_sync is non-zero, then the IRQ handler must be synchronized
3198 * with as well. Most of the time, this is not necessary except when
3199 * shutting down the device.
3201 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3204 tg3_irq_quiesce(tp);
3205 spin_lock_bh(&tp->lock);
3206 spin_lock(&tp->tx_lock);
3209 static inline void tg3_full_unlock(struct tg3 *tp)
3211 spin_unlock(&tp->tx_lock);
3212 spin_unlock_bh(&tp->lock);
3215 /* MSI ISR - No need to check for interrupt sharing and no need to
3216 * flush status block and interrupt mailbox. PCI ordering rules
3217 * guarantee that MSI will arrive after the status block.
3219 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3221 struct net_device *dev = dev_id;