1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2005 Neterion Inc.
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
30 * rx_ring_num : This can be used to program the number of receive rings used
32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
33 * This is also an array of size 8.
34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35 * values are 1, 2 and 3.
36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
38 * Tx descriptors that can be associated with each corresponding FIFO.
39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40 * 1(MSI), 2(MSI_X). Default value is '0(INTA)'
41 * lro: Specifies whether to enable Large Receive Offload (LRO) or not.
42 * Possible values '1' for enable '0' for disable. Default is '0'
43 * lro_max_pkts: This parameter defines maximum number of packets can be
44 * aggregated as a single large packet
45 * napi: This parameter used to enable/disable NAPI (polling Rx)
46 * Possible values '1' for enable and '0' for disable. Default is '1'
47 * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48 * Possible values '1' for enable and '0' for disable. Default is '0'
49 * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50 * Possible values '1' for enable , '0' for disable.
51 * Default is '2' - which means disable in promisc mode
52 * and enable in non-promiscuous mode.
53 ************************************************************************/
55 #include <linux/module.h>
56 #include <linux/types.h>
57 #include <linux/errno.h>
58 #include <linux/ioport.h>
59 #include <linux/pci.h>
60 #include <linux/dma-mapping.h>
61 #include <linux/kernel.h>
62 #include <linux/netdevice.h>
63 #include <linux/etherdevice.h>
64 #include <linux/skbuff.h>
65 #include <linux/init.h>
66 #include <linux/delay.h>
67 #include <linux/stddef.h>
68 #include <linux/ioctl.h>
69 #include <linux/timex.h>
70 #include <linux/ethtool.h>
71 #include <linux/workqueue.h>
72 #include <linux/if_vlan.h>
74 #include <linux/tcp.h>
77 #include <asm/system.h>
78 #include <asm/uaccess.h>
80 #include <asm/div64.h>
85 #include "s2io-regs.h"
87 #define DRV_VERSION "2.0.17.1"
89 /* S2io Driver name & version. */
90 static char s2io_driver_name[] = "Neterion";
91 static char s2io_driver_version[] = DRV_VERSION;
93 static int rxd_size[4] = {32,48,48,64};
94 static int rxd_count[4] = {127,85,85,63};
96 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
100 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
101 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
107 * Cards with following subsystem_id have a link state indication
108 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
109 * macro below identifies these cards given the subsystem_id.
111 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
112 (dev_type == XFRAME_I_DEVICE) ? \
113 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
114 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
116 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
117 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
118 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
121 static inline int rx_buffer_level(struct s2io_nic * sp, int rxb_size, int ring)
123 struct mac_info *mac_control;
125 mac_control = &sp->mac_control;
126 if (rxb_size <= rxd_count[sp->rxd_mode])
128 else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
133 /* Ethtool related variables and Macros. */
134 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
135 "Register test\t(offline)",
136 "Eeprom test\t(offline)",
137 "Link test\t(online)",
138 "RLDRAM test\t(offline)",
139 "BIST Test\t(offline)"
142 static char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
144 {"tmac_data_octets"},
148 {"tmac_pause_ctrl_frms"},
152 {"tmac_any_err_frms"},
153 {"tmac_ttl_less_fb_octets"},
154 {"tmac_vld_ip_octets"},
162 {"rmac_data_octets"},
163 {"rmac_fcs_err_frms"},
165 {"rmac_vld_mcst_frms"},
166 {"rmac_vld_bcst_frms"},
167 {"rmac_in_rng_len_err_frms"},
168 {"rmac_out_rng_len_err_frms"},
170 {"rmac_pause_ctrl_frms"},
171 {"rmac_unsup_ctrl_frms"},
173 {"rmac_accepted_ucst_frms"},
174 {"rmac_accepted_nucst_frms"},
175 {"rmac_discarded_frms"},
176 {"rmac_drop_events"},
177 {"rmac_ttl_less_fb_octets"},
179 {"rmac_usized_frms"},
180 {"rmac_osized_frms"},
182 {"rmac_jabber_frms"},
183 {"rmac_ttl_64_frms"},
184 {"rmac_ttl_65_127_frms"},
185 {"rmac_ttl_128_255_frms"},
186 {"rmac_ttl_256_511_frms"},
187 {"rmac_ttl_512_1023_frms"},
188 {"rmac_ttl_1024_1518_frms"},
196 {"rmac_err_drp_udp"},
197 {"rmac_xgmii_err_sym"},
215 {"rmac_xgmii_data_err_cnt"},
216 {"rmac_xgmii_ctrl_err_cnt"},
217 {"rmac_accepted_ip"},
221 {"new_rd_req_rtry_cnt"},
223 {"wr_rtry_rd_ack_cnt"},
226 {"new_wr_req_rtry_cnt"},
229 {"rd_rtry_wr_ack_cnt"},
239 static char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
240 {"rmac_ttl_1519_4095_frms"},
241 {"rmac_ttl_4096_8191_frms"},
242 {"rmac_ttl_8192_max_frms"},
243 {"rmac_ttl_gt_max_frms"},
244 {"rmac_osized_alt_frms"},
245 {"rmac_jabber_alt_frms"},
246 {"rmac_gt_max_alt_frms"},
248 {"rmac_len_discard"},
249 {"rmac_fcs_discard"},
252 {"rmac_red_discard"},
253 {"rmac_rts_discard"},
254 {"rmac_ingm_full_discard"},
258 static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
259 {"\n DRIVER STATISTICS"},
260 {"single_bit_ecc_errs"},
261 {"double_bit_ecc_errs"},
267 ("alarm_transceiver_temp_high"),
268 ("alarm_transceiver_temp_low"),
269 ("alarm_laser_bias_current_high"),
270 ("alarm_laser_bias_current_low"),
271 ("alarm_laser_output_power_high"),
272 ("alarm_laser_output_power_low"),
273 ("warn_transceiver_temp_high"),
274 ("warn_transceiver_temp_low"),
275 ("warn_laser_bias_current_high"),
276 ("warn_laser_bias_current_low"),
277 ("warn_laser_output_power_high"),
278 ("warn_laser_output_power_low"),
279 ("lro_aggregated_pkts"),
280 ("lro_flush_both_count"),
281 ("lro_out_of_sequence_pkts"),
282 ("lro_flush_due_to_max_pkts"),
283 ("lro_avg_aggr_pkts"),
286 #define S2IO_XENA_STAT_LEN sizeof(ethtool_xena_stats_keys)/ ETH_GSTRING_LEN
287 #define S2IO_ENHANCED_STAT_LEN sizeof(ethtool_enhanced_stats_keys)/ \
289 #define S2IO_DRIVER_STAT_LEN sizeof(ethtool_driver_stats_keys)/ ETH_GSTRING_LEN
291 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN )
292 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN )
294 #define XFRAME_I_STAT_STRINGS_LEN ( XFRAME_I_STAT_LEN * ETH_GSTRING_LEN )
295 #define XFRAME_II_STAT_STRINGS_LEN ( XFRAME_II_STAT_LEN * ETH_GSTRING_LEN )
297 #define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
298 #define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
300 #define S2IO_TIMER_CONF(timer, handle, arg, exp) \
301 init_timer(&timer); \
302 timer.function = handle; \
303 timer.data = (unsigned long) arg; \
304 mod_timer(&timer, (jiffies + exp)) \
307 static void s2io_vlan_rx_register(struct net_device *dev,
308 struct vlan_group *grp)
310 struct s2io_nic *nic = dev->priv;
313 spin_lock_irqsave(&nic->tx_lock, flags);
315 spin_unlock_irqrestore(&nic->tx_lock, flags);
318 /* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
321 /* Unregister the vlan */
322 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
324 struct s2io_nic *nic = dev->priv;
327 spin_lock_irqsave(&nic->tx_lock, flags);
329 nic->vlgrp->vlan_devices[vid] = NULL;
330 spin_unlock_irqrestore(&nic->tx_lock, flags);
334 * Constants to be programmed into the Xena's registers, to configure
339 static const u64 herc_act_dtx_cfg[] = {
341 0x8000051536750000ULL, 0x80000515367500E0ULL,
343 0x8000051536750004ULL, 0x80000515367500E4ULL,
345 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
347 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
349 0x801205150D440000ULL, 0x801205150D4400E0ULL,
351 0x801205150D440004ULL, 0x801205150D4400E4ULL,
353 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
355 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
360 static const u64 xena_dtx_cfg[] = {
362 0x8000051500000000ULL, 0x80000515000000E0ULL,
364 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
366 0x8001051500000000ULL, 0x80010515000000E0ULL,
368 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
370 0x8002051500000000ULL, 0x80020515000000E0ULL,
372 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
377 * Constants for Fixing the MacAddress problem seen mostly on
380 static const u64 fix_mac[] = {
381 0x0060000000000000ULL, 0x0060600000000000ULL,
382 0x0040600000000000ULL, 0x0000600000000000ULL,
383 0x0020600000000000ULL, 0x0060600000000000ULL,
384 0x0020600000000000ULL, 0x0060600000000000ULL,
385 0x0020600000000000ULL, 0x0060600000000000ULL,
386 0x0020600000000000ULL, 0x0060600000000000ULL,
387 0x0020600000000000ULL, 0x0060600000000000ULL,
388 0x0020600000000000ULL, 0x0060600000000000ULL,
389 0x0020600000000000ULL, 0x0060600000000000ULL,
390 0x0020600000000000ULL, 0x0060600000000000ULL,
391 0x0020600000000000ULL, 0x0060600000000000ULL,
392 0x0020600000000000ULL, 0x0060600000000000ULL,
393 0x0020600000000000ULL, 0x0000600000000000ULL,
394 0x0040600000000000ULL, 0x0060600000000000ULL,
398 MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
399 MODULE_LICENSE("GPL");
400 MODULE_VERSION(DRV_VERSION);
403 /* Module Loadable parameters. */
404 S2IO_PARM_INT(tx_fifo_num, 1);
405 S2IO_PARM_INT(rx_ring_num, 1);
408 S2IO_PARM_INT(rx_ring_mode, 1);
409 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
410 S2IO_PARM_INT(rmac_pause_time, 0x100);
411 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
412 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
413 S2IO_PARM_INT(shared_splits, 0);
414 S2IO_PARM_INT(tmac_util_period, 5);
415 S2IO_PARM_INT(rmac_util_period, 5);
416 S2IO_PARM_INT(bimodal, 0);
417 S2IO_PARM_INT(l3l4hdr_size, 128);
418 /* Frequency of Rx desc syncs expressed as power of 2 */
419 S2IO_PARM_INT(rxsync_frequency, 3);
420 /* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */
421 S2IO_PARM_INT(intr_type, 0);
422 /* Large receive offload feature */
423 S2IO_PARM_INT(lro, 0);
424 /* Max pkts to be aggregated by LRO at one time. If not specified,
425 * aggregation happens until we hit max IP pkt size(64K)
427 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
428 S2IO_PARM_INT(indicate_max_pkts, 0);
430 S2IO_PARM_INT(napi, 1);
431 S2IO_PARM_INT(ufo, 0);
432 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
434 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
435 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
436 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
437 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
438 static unsigned int rts_frm_len[MAX_RX_RINGS] =
439 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
441 module_param_array(tx_fifo_len, uint, NULL, 0);
442 module_param_array(rx_ring_sz, uint, NULL, 0);
443 module_param_array(rts_frm_len, uint, NULL, 0);
447 * This table lists all the devices that this driver supports.
449 static struct pci_device_id s2io_tbl[] __devinitdata = {
450 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
451 PCI_ANY_ID, PCI_ANY_ID},
452 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
453 PCI_ANY_ID, PCI_ANY_ID},
454 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
455 PCI_ANY_ID, PCI_ANY_ID},
456 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
457 PCI_ANY_ID, PCI_ANY_ID},
461 MODULE_DEVICE_TABLE(pci, s2io_tbl);
463 static struct pci_driver s2io_driver = {
465 .id_table = s2io_tbl,
466 .probe = s2io_init_nic,
467 .remove = __devexit_p(s2io_rem_nic),
470 /* A simplifier macro used both by init and free shared_mem Fns(). */
471 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
474 * init_shared_mem - Allocation and Initialization of Memory
475 * @nic: Device private variable.
476 * Description: The function allocates all the memory areas shared
477 * between the NIC and the driver. This includes Tx descriptors,
478 * Rx descriptors and the statistics block.
481 static int init_shared_mem(struct s2io_nic *nic)
484 void *tmp_v_addr, *tmp_v_addr_next;
485 dma_addr_t tmp_p_addr, tmp_p_addr_next;
486 struct RxD_block *pre_rxd_blk = NULL;
488 int lst_size, lst_per_page;
489 struct net_device *dev = nic->dev;
493 struct mac_info *mac_control;
494 struct config_param *config;
496 mac_control = &nic->mac_control;
497 config = &nic->config;
500 /* Allocation and initialization of TXDLs in FIOFs */
502 for (i = 0; i < config->tx_fifo_num; i++) {
503 size += config->tx_cfg[i].fifo_len;
505 if (size > MAX_AVAILABLE_TXDS) {
506 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
507 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
511 lst_size = (sizeof(struct TxD) * config->max_txds);
512 lst_per_page = PAGE_SIZE / lst_size;
514 for (i = 0; i < config->tx_fifo_num; i++) {
515 int fifo_len = config->tx_cfg[i].fifo_len;
516 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
517 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
519 if (!mac_control->fifos[i].list_info) {
521 "Malloc failed for list_info\n");
524 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
526 for (i = 0; i < config->tx_fifo_num; i++) {
527 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
529 mac_control->fifos[i].tx_curr_put_info.offset = 0;
530 mac_control->fifos[i].tx_curr_put_info.fifo_len =
531 config->tx_cfg[i].fifo_len - 1;
532 mac_control->fifos[i].tx_curr_get_info.offset = 0;
533 mac_control->fifos[i].tx_curr_get_info.fifo_len =
534 config->tx_cfg[i].fifo_len - 1;
535 mac_control->fifos[i].fifo_no = i;
536 mac_control->fifos[i].nic = nic;
537 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
539 for (j = 0; j < page_num; j++) {
543 tmp_v = pci_alloc_consistent(nic->pdev,
547 "pci_alloc_consistent ");
548 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
551 /* If we got a zero DMA address(can happen on
552 * certain platforms like PPC), reallocate.
553 * Store virtual address of page we don't want,
557 mac_control->zerodma_virt_addr = tmp_v;
559 "%s: Zero DMA address for TxDL. ", dev->name);
561 "Virtual address %p\n", tmp_v);
562 tmp_v = pci_alloc_consistent(nic->pdev,
566 "pci_alloc_consistent ");
567 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
571 while (k < lst_per_page) {
572 int l = (j * lst_per_page) + k;
573 if (l == config->tx_cfg[i].fifo_len)
575 mac_control->fifos[i].list_info[l].list_virt_addr =
576 tmp_v + (k * lst_size);
577 mac_control->fifos[i].list_info[l].list_phy_addr =
578 tmp_p + (k * lst_size);
584 nic->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
585 if (!nic->ufo_in_band_v)
588 /* Allocation and initialization of RXDs in Rings */
590 for (i = 0; i < config->rx_ring_num; i++) {
591 if (config->rx_cfg[i].num_rxd %
592 (rxd_count[nic->rxd_mode] + 1)) {
593 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
594 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
596 DBG_PRINT(ERR_DBG, "RxDs per Block");
599 size += config->rx_cfg[i].num_rxd;
600 mac_control->rings[i].block_count =
601 config->rx_cfg[i].num_rxd /
602 (rxd_count[nic->rxd_mode] + 1 );
603 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
604 mac_control->rings[i].block_count;
606 if (nic->rxd_mode == RXD_MODE_1)
607 size = (size * (sizeof(struct RxD1)));
609 size = (size * (sizeof(struct RxD3)));
611 for (i = 0; i < config->rx_ring_num; i++) {
612 mac_control->rings[i].rx_curr_get_info.block_index = 0;
613 mac_control->rings[i].rx_curr_get_info.offset = 0;
614 mac_control->rings[i].rx_curr_get_info.ring_len =
615 config->rx_cfg[i].num_rxd - 1;
616 mac_control->rings[i].rx_curr_put_info.block_index = 0;
617 mac_control->rings[i].rx_curr_put_info.offset = 0;
618 mac_control->rings[i].rx_curr_put_info.ring_len =
619 config->rx_cfg[i].num_rxd - 1;
620 mac_control->rings[i].nic = nic;
621 mac_control->rings[i].ring_no = i;
623 blk_cnt = config->rx_cfg[i].num_rxd /
624 (rxd_count[nic->rxd_mode] + 1);
625 /* Allocating all the Rx blocks */
626 for (j = 0; j < blk_cnt; j++) {
627 struct rx_block_info *rx_blocks;
630 rx_blocks = &mac_control->rings[i].rx_blocks[j];
631 size = SIZE_OF_BLOCK; //size is always page size
632 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
634 if (tmp_v_addr == NULL) {
636 * In case of failure, free_shared_mem()
637 * is called, which should free any
638 * memory that was alloced till the
641 rx_blocks->block_virt_addr = tmp_v_addr;
644 memset(tmp_v_addr, 0, size);
645 rx_blocks->block_virt_addr = tmp_v_addr;
646 rx_blocks->block_dma_addr = tmp_p_addr;
647 rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
648 rxd_count[nic->rxd_mode],
650 if (!rx_blocks->rxds)
652 for (l=0; l<rxd_count[nic->rxd_mode];l++) {
653 rx_blocks->rxds[l].virt_addr =
654 rx_blocks->block_virt_addr +
655 (rxd_size[nic->rxd_mode] * l);
656 rx_blocks->rxds[l].dma_addr =
657 rx_blocks->block_dma_addr +
658 (rxd_size[nic->rxd_mode] * l);
661 /* Interlinking all Rx Blocks */
662 for (j = 0; j < blk_cnt; j++) {
664 mac_control->rings[i].rx_blocks[j].block_virt_addr;
666 mac_control->rings[i].rx_blocks[(j + 1) %
667 blk_cnt].block_virt_addr;
669 mac_control->rings[i].rx_blocks[j].block_dma_addr;
671 mac_control->rings[i].rx_blocks[(j + 1) %
672 blk_cnt].block_dma_addr;
674 pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
675 pre_rxd_blk->reserved_2_pNext_RxD_block =
676 (unsigned long) tmp_v_addr_next;
677 pre_rxd_blk->pNext_RxD_Blk_physical =
678 (u64) tmp_p_addr_next;
681 if (nic->rxd_mode >= RXD_MODE_3A) {
683 * Allocation of Storages for buffer addresses in 2BUFF mode
684 * and the buffers as well.
686 for (i = 0; i < config->rx_ring_num; i++) {
687 blk_cnt = config->rx_cfg[i].num_rxd /
688 (rxd_count[nic->rxd_mode]+ 1);
689 mac_control->rings[i].ba =
690 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
692 if (!mac_control->rings[i].ba)
694 for (j = 0; j < blk_cnt; j++) {
696 mac_control->rings[i].ba[j] =
697 kmalloc((sizeof(struct buffAdd) *
698 (rxd_count[nic->rxd_mode] + 1)),
700 if (!mac_control->rings[i].ba[j])
702 while (k != rxd_count[nic->rxd_mode]) {
703 ba = &mac_control->rings[i].ba[j][k];
705 ba->ba_0_org = (void *) kmalloc
706 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
709 tmp = (unsigned long)ba->ba_0_org;
711 tmp &= ~((unsigned long) ALIGN_SIZE);
712 ba->ba_0 = (void *) tmp;
714 ba->ba_1_org = (void *) kmalloc
715 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
718 tmp = (unsigned long) ba->ba_1_org;
720 tmp &= ~((unsigned long) ALIGN_SIZE);
721 ba->ba_1 = (void *) tmp;
728 /* Allocation and initialization of Statistics block */
729 size = sizeof(struct stat_block);
730 mac_control->stats_mem = pci_alloc_consistent
731 (nic->pdev, size, &mac_control->stats_mem_phy);
733 if (!mac_control->stats_mem) {
735 * In case of failure, free_shared_mem() is called, which
736 * should free any memory that was alloced till the
741 mac_control->stats_mem_sz = size;
743 tmp_v_addr = mac_control->stats_mem;
744 mac_control->stats_info = (struct stat_block *) tmp_v_addr;
745 memset(tmp_v_addr, 0, size);
746 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
747 (unsigned long long) tmp_p_addr);
753 * free_shared_mem - Free the allocated Memory
754 * @nic: Device private variable.
755 * Description: This function is to free all memory locations allocated by
756 * the init_shared_mem() function and return it to the kernel.
759 static void free_shared_mem(struct s2io_nic *nic)
761 int i, j, blk_cnt, size;
763 dma_addr_t tmp_p_addr;
764 struct mac_info *mac_control;
765 struct config_param *config;
766 int lst_size, lst_per_page;
767 struct net_device *dev = nic->dev;
772 mac_control = &nic->mac_control;
773 config = &nic->config;
775 lst_size = (sizeof(struct TxD) * config->max_txds);
776 lst_per_page = PAGE_SIZE / lst_size;
778 for (i = 0; i < config->tx_fifo_num; i++) {
779 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
781 for (j = 0; j < page_num; j++) {
782 int mem_blks = (j * lst_per_page);
783 if (!mac_control->fifos[i].list_info)
785 if (!mac_control->fifos[i].list_info[mem_blks].
788 pci_free_consistent(nic->pdev, PAGE_SIZE,
789 mac_control->fifos[i].
792 mac_control->fifos[i].
796 /* If we got a zero DMA address during allocation,
799 if (mac_control->zerodma_virt_addr) {
800 pci_free_consistent(nic->pdev, PAGE_SIZE,
801 mac_control->zerodma_virt_addr,
804 "%s: Freeing TxDL with zero DMA addr. ",
806 DBG_PRINT(INIT_DBG, "Virtual address %p\n",
807 mac_control->zerodma_virt_addr);
809 kfree(mac_control->fifos[i].list_info);
812 size = SIZE_OF_BLOCK;
813 for (i = 0; i < config->rx_ring_num; i++) {
814 blk_cnt = mac_control->rings[i].block_count;
815 for (j = 0; j < blk_cnt; j++) {
816 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
818 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
820 if (tmp_v_addr == NULL)
822 pci_free_consistent(nic->pdev, size,
823 tmp_v_addr, tmp_p_addr);
824 kfree(mac_control->rings[i].rx_blocks[j].rxds);
828 if (nic->rxd_mode >= RXD_MODE_3A) {
829 /* Freeing buffer storage addresses in 2BUFF mode. */
830 for (i = 0; i < config->rx_ring_num; i++) {
831 blk_cnt = config->rx_cfg[i].num_rxd /
832 (rxd_count[nic->rxd_mode] + 1);
833 for (j = 0; j < blk_cnt; j++) {
835 if (!mac_control->rings[i].ba[j])
837 while (k != rxd_count[nic->rxd_mode]) {
839 &mac_control->rings[i].ba[j][k];
844 kfree(mac_control->rings[i].ba[j]);
846 kfree(mac_control->rings[i].ba);
850 if (mac_control->stats_mem) {
851 pci_free_consistent(nic->pdev,
852 mac_control->stats_mem_sz,
853 mac_control->stats_mem,
854 mac_control->stats_mem_phy);
856 if (nic->ufo_in_band_v)
857 kfree(nic->ufo_in_band_v);
861 * s2io_verify_pci_mode -
864 static int s2io_verify_pci_mode(struct s2io_nic *nic)
866 struct XENA_dev_config __iomem *bar0 = nic->bar0;
867 register u64 val64 = 0;
870 val64 = readq(&bar0->pci_mode);
871 mode = (u8)GET_PCI_MODE(val64);
873 if ( val64 & PCI_MODE_UNKNOWN_MODE)
874 return -1; /* Unknown PCI mode */
878 #define NEC_VENID 0x1033
879 #define NEC_DEVID 0x0125
880 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
882 struct pci_dev *tdev = NULL;
883 while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
884 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
885 if (tdev->bus == s2io_pdev->bus->parent)
893 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
895 * s2io_print_pci_mode -
897 static int s2io_print_pci_mode(struct s2io_nic *nic)
899 struct XENA_dev_config __iomem *bar0 = nic->bar0;
900 register u64 val64 = 0;
902 struct config_param *config = &nic->config;
904 val64 = readq(&bar0->pci_mode);
905 mode = (u8)GET_PCI_MODE(val64);
907 if ( val64 & PCI_MODE_UNKNOWN_MODE)
908 return -1; /* Unknown PCI mode */
910 config->bus_speed = bus_speed[mode];
912 if (s2io_on_nec_bridge(nic->pdev)) {
913 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
918 if (val64 & PCI_MODE_32_BITS) {
919 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
921 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
925 case PCI_MODE_PCI_33:
926 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
928 case PCI_MODE_PCI_66:
929 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
931 case PCI_MODE_PCIX_M1_66:
932 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
934 case PCI_MODE_PCIX_M1_100:
935 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
937 case PCI_MODE_PCIX_M1_133:
938 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
940 case PCI_MODE_PCIX_M2_66:
941 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
943 case PCI_MODE_PCIX_M2_100:
944 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
946 case PCI_MODE_PCIX_M2_133:
947 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
950 return -1; /* Unsupported bus speed */
957 * init_nic - Initialization of hardware
958 * @nic: device peivate variable
959 * Description: The function sequentially configures every block
960 * of the H/W from their reset values.
961 * Return Value: SUCCESS on success and
962 * '-1' on failure (endian settings incorrect).
965 static int init_nic(struct s2io_nic *nic)
967 struct XENA_dev_config __iomem *bar0 = nic->bar0;
968 struct net_device *dev = nic->dev;
969 register u64 val64 = 0;
973 struct mac_info *mac_control;
974 struct config_param *config;
976 unsigned long long mem_share;
979 mac_control = &nic->mac_control;
980 config = &nic->config;
982 /* to set the swapper controle on the card */
983 if(s2io_set_swapper(nic)) {
984 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
989 * Herc requires EOI to be removed from reset before XGXS, so..
991 if (nic->device_type & XFRAME_II_DEVICE) {
992 val64 = 0xA500000000ULL;
993 writeq(val64, &bar0->sw_reset);
995 val64 = readq(&bar0->sw_reset);
998 /* Remove XGXS from reset state */
1000 writeq(val64, &bar0->sw_reset);
1002 val64 = readq(&bar0->sw_reset);
1004 /* Enable Receiving broadcasts */
1005 add = &bar0->mac_cfg;
1006 val64 = readq(&bar0->mac_cfg);
1007 val64 |= MAC_RMAC_BCAST_ENABLE;
1008 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1009 writel((u32) val64, add);
1010 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1011 writel((u32) (val64 >> 32), (add + 4));
1013 /* Read registers in all blocks */
1014 val64 = readq(&bar0->mac_int_mask);
1015 val64 = readq(&bar0->mc_int_mask);
1016 val64 = readq(&bar0->xgxs_int_mask);
1020 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1022 if (nic->device_type & XFRAME_II_DEVICE) {
1023 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1024 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1025 &bar0->dtx_control, UF);
1027 msleep(1); /* Necessary!! */
1031 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1032 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1033 &bar0->dtx_control, UF);
1034 val64 = readq(&bar0->dtx_control);
1039 /* Tx DMA Initialization */
1041 writeq(val64, &bar0->tx_fifo_partition_0);
1042 writeq(val64, &bar0->tx_fifo_partition_1);
1043 writeq(val64, &bar0->tx_fifo_partition_2);
1044 writeq(val64, &bar0->tx_fifo_partition_3);
1047 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1049 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
1050 13) | vBIT(config->tx_cfg[i].fifo_priority,
1053 if (i == (config->tx_fifo_num - 1)) {
1060 writeq(val64, &bar0->tx_fifo_partition_0);
1064 writeq(val64, &bar0->tx_fifo_partition_1);
1068 writeq(val64, &bar0->tx_fifo_partition_2);
1072 writeq(val64, &bar0->tx_fifo_partition_3);
1078 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1079 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1081 if ((nic->device_type == XFRAME_I_DEVICE) &&
1082 (get_xena_rev_id(nic->pdev) < 4))
1083 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1085 val64 = readq(&bar0->tx_fifo_partition_0);
1086 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1087 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1090 * Initialization of Tx_PA_CONFIG register to ignore packet
1091 * integrity checking.
1093 val64 = readq(&bar0->tx_pa_cfg);
1094 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1095 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1096 writeq(val64, &bar0->tx_pa_cfg);
1098 /* Rx DMA intialization. */
1100 for (i = 0; i < config->rx_ring_num; i++) {
1102 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1105 writeq(val64, &bar0->rx_queue_priority);
1108 * Allocating equal share of memory to all the
1112 if (nic->device_type & XFRAME_II_DEVICE)
1117 for (i = 0; i < config->rx_ring_num; i++) {
1120 mem_share = (mem_size / config->rx_ring_num +
1121 mem_size % config->rx_ring_num);
1122 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1125 mem_share = (mem_size / config->rx_ring_num);
1126 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1129 mem_share = (mem_size / config->rx_ring_num);
1130 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1133 mem_share = (mem_size / config->rx_ring_num);
1134 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1137 mem_share = (mem_size / config->rx_ring_num);
1138 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1141 mem_share = (mem_size / config->rx_ring_num);
1142 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1145 mem_share = (mem_size / config->rx_ring_num);
1146 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1149 mem_share = (mem_size / config->rx_ring_num);
1150 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1154 writeq(val64, &bar0->rx_queue_cfg);
1157 * Filling Tx round robin registers
1158 * as per the number of FIFOs
1160 switch (config->tx_fifo_num) {
1162 val64 = 0x0000000000000000ULL;
1163 writeq(val64, &bar0->tx_w_round_robin_0);
1164 writeq(val64, &bar0->tx_w_round_robin_1);
1165 writeq(val64, &bar0->tx_w_round_robin_2);
1166 writeq(val64, &bar0->tx_w_round_robin_3);
1167 writeq(val64, &bar0->tx_w_round_robin_4);
1170 val64 = 0x0000010000010000ULL;
1171 writeq(val64, &bar0->tx_w_round_robin_0);
1172 val64 = 0x0100000100000100ULL;
1173 writeq(val64, &bar0->tx_w_round_robin_1);
1174 val64 = 0x0001000001000001ULL;
1175 writeq(val64, &bar0->tx_w_round_robin_2);
1176 val64 = 0x0000010000010000ULL;
1177 writeq(val64, &bar0->tx_w_round_robin_3);
1178 val64 = 0x0100000000000000ULL;
1179 writeq(val64, &bar0->tx_w_round_robin_4);
1182 val64 = 0x0001000102000001ULL;
1183 writeq(val64, &bar0->tx_w_round_robin_0);
1184 val64 = 0x0001020000010001ULL;
1185 writeq(val64, &bar0->tx_w_round_robin_1);
1186 val64 = 0x0200000100010200ULL;
1187 writeq(val64, &bar0->tx_w_round_robin_2);
1188 val64 = 0x0001000102000001ULL;
1189 writeq(val64, &bar0->tx_w_round_robin_3);
1190 val64 = 0x0001020000000000ULL;
1191 writeq(val64, &bar0->tx_w_round_robin_4);
1194 val64 = 0x0001020300010200ULL;
1195 writeq(val64, &bar0->tx_w_round_robin_0);
1196 val64 = 0x0100000102030001ULL;
1197 writeq(val64, &bar0->tx_w_round_robin_1);
1198 val64 = 0x0200010000010203ULL;
1199 writeq(val64, &bar0->tx_w_round_robin_2);
1200 val64 = 0x0001020001000001ULL;
1201 writeq(val64, &bar0->tx_w_round_robin_3);
1202 val64 = 0x0203000100000000ULL;
1203 writeq(val64, &bar0->tx_w_round_robin_4);
1206 val64 = 0x0001000203000102ULL;
1207 writeq(val64, &bar0->tx_w_round_robin_0);
1208 val64 = 0x0001020001030004ULL;
1209 writeq(val64, &bar0->tx_w_round_robin_1);
1210 val64 = 0x0001000203000102ULL;
1211 writeq(val64, &bar0->tx_w_round_robin_2);
1212 val64 = 0x0001020001030004ULL;
1213 writeq(val64, &bar0->tx_w_round_robin_3);
1214 val64 = 0x0001000000000000ULL;
1215 writeq(val64, &bar0->tx_w_round_robin_4);
1218 val64 = 0x0001020304000102ULL;
1219 writeq(val64, &bar0->tx_w_round_robin_0);
1220 val64 = 0x0304050001020001ULL;
1221 writeq(val64, &bar0->tx_w_round_robin_1);
1222 val64 = 0x0203000100000102ULL;
1223 writeq(val64, &bar0->tx_w_round_robin_2);
1224 val64 = 0x0304000102030405ULL;
1225 writeq(val64, &bar0->tx_w_round_robin_3);
1226 val64 = 0x0001000200000000ULL;
1227 writeq(val64, &bar0->tx_w_round_robin_4);
1230 val64 = 0x0001020001020300ULL;
1231 writeq(val64, &bar0->tx_w_round_robin_0);
1232 val64 = 0x0102030400010203ULL;
1233 writeq(val64, &bar0->tx_w_round_robin_1);
1234 val64 = 0x0405060001020001ULL;
1235 writeq(val64, &bar0->tx_w_round_robin_2);
1236 val64 = 0x0304050000010200ULL;
1237 writeq(val64, &bar0->tx_w_round_robin_3);
1238 val64 = 0x0102030000000000ULL;
1239 writeq(val64, &bar0->tx_w_round_robin_4);
1242 val64 = 0x0001020300040105ULL;
1243 writeq(val64, &bar0->tx_w_round_robin_0);
1244 val64 = 0x0200030106000204ULL;
1245 writeq(val64, &bar0->tx_w_round_robin_1);
1246 val64 = 0x0103000502010007ULL;
1247 writeq(val64, &bar0->tx_w_round_robin_2);
1248 val64 = 0x0304010002060500ULL;
1249 writeq(val64, &bar0->tx_w_round_robin_3);
1250 val64 = 0x0103020400000000ULL;
1251 writeq(val64, &bar0->tx_w_round_robin_4);
1255 /* Enable all configured Tx FIFO partitions */
1256 val64 = readq(&bar0->tx_fifo_partition_0);
1257 val64 |= (TX_FIFO_PARTITION_EN);
1258 writeq(val64, &bar0->tx_fifo_partition_0);
1260 /* Filling the Rx round robin registers as per the
1261 * number of Rings and steering based on QoS.
1263 switch (config->rx_ring_num) {
1265 val64 = 0x8080808080808080ULL;
1266 writeq(val64, &bar0->rts_qos_steering);
1269 val64 = 0x0000010000010000ULL;
1270 writeq(val64, &bar0->rx_w_round_robin_0);
1271 val64 = 0x0100000100000100ULL;
1272 writeq(val64, &bar0->rx_w_round_robin_1);
1273 val64 = 0x0001000001000001ULL;
1274 writeq(val64, &bar0->rx_w_round_robin_2);
1275 val64 = 0x0000010000010000ULL;
1276 writeq(val64, &bar0->rx_w_round_robin_3);
1277 val64 = 0x0100000000000000ULL;
1278 writeq(val64, &bar0->rx_w_round_robin_4);
1280 val64 = 0x8080808040404040ULL;
1281 writeq(val64, &bar0->rts_qos_steering);
1284 val64 = 0x0001000102000001ULL;
1285 writeq(val64, &bar0->rx_w_round_robin_0);
1286 val64 = 0x0001020000010001ULL;
1287 writeq(val64, &bar0->rx_w_round_robin_1);
1288 val64 = 0x0200000100010200ULL;
1289 writeq(val64, &bar0->rx_w_round_robin_2);
1290 val64 = 0x0001000102000001ULL;
1291 writeq(val64, &bar0->rx_w_round_robin_3);
1292 val64 = 0x0001020000000000ULL;
1293 writeq(val64, &bar0->rx_w_round_robin_4);
1295 val64 = 0x8080804040402020ULL;
1296 writeq(val64, &bar0->rts_qos_steering);
1299 val64 = 0x0001020300010200ULL;
1300 writeq(val64, &bar0->rx_w_round_robin_0);
1301 val64 = 0x0100000102030001ULL;
1302 writeq(val64, &bar0->rx_w_round_robin_1);
1303 val64 = 0x0200010000010203ULL;
1304 writeq(val64, &bar0->rx_w_round_robin_2);
1305 val64 = 0x0001020001000001ULL;
1306 writeq(val64, &bar0->rx_w_round_robin_3);
1307 val64 = 0x0203000100000000ULL;
1308 writeq(val64, &bar0->rx_w_round_robin_4);
1310 val64 = 0x8080404020201010ULL;
1311 writeq(val64, &bar0->rts_qos_steering);
1314 val64 = 0x0001000203000102ULL;
1315 writeq(val64, &bar0->rx_w_round_robin_0);
1316 val64 = 0x0001020001030004ULL;
1317 writeq(val64, &bar0->rx_w_round_robin_1);
1318 val64 = 0x0001000203000102ULL;
1319 writeq(val64, &bar0->rx_w_round_robin_2);
1320 val64 = 0x0001020001030004ULL;
1321 writeq(val64, &bar0->rx_w_round_robin_3);
1322 val64 = 0x0001000000000000ULL;
1323 writeq(val64, &bar0->rx_w_round_robin_4);
1325 val64 = 0x8080404020201008ULL;
1326 writeq(val64, &bar0->rts_qos_steering);
1329 val64 = 0x0001020304000102ULL;
1330 writeq(val64, &bar0->rx_w_round_robin_0);
1331 val64 = 0x0304050001020001ULL;
1332 writeq(val64, &bar0->rx_w_round_robin_1);
1333 val64 = 0x0203000100000102ULL;
1334 writeq(val64, &bar0->rx_w_round_robin_2);
1335 val64 = 0x0304000102030405ULL;
1336 writeq(val64, &bar0->rx_w_round_robin_3);
1337 val64 = 0x0001000200000000ULL;
1338 writeq(val64, &bar0->rx_w_round_robin_4);
1340 val64 = 0x8080404020100804ULL;
1341 writeq(val64, &bar0->rts_qos_steering);
1344 val64 = 0x0001020001020300ULL;
1345 writeq(val64, &bar0->rx_w_round_robin_0);
1346 val64 = 0x0102030400010203ULL;
1347 writeq(val64, &bar0->rx_w_round_robin_1);
1348 val64 = 0x0405060001020001ULL;
1349 writeq(val64, &bar0->rx_w_round_robin_2);
1350 val64 = 0x0304050000010200ULL;
1351 writeq(val64, &bar0->rx_w_round_robin_3);
1352 val64 = 0x0102030000000000ULL;
1353 writeq(val64, &bar0->rx_w_round_robin_4);
1355 val64 = 0x8080402010080402ULL;
1356 writeq(val64, &bar0->rts_qos_steering);
1359 val64 = 0x0001020300040105ULL;
1360 writeq(val64, &bar0->rx_w_round_robin_0);
1361 val64 = 0x0200030106000204ULL;
1362 writeq(val64, &bar0->rx_w_round_robin_1);
1363 val64 = 0x0103000502010007ULL;
1364 writeq(val64, &bar0->rx_w_round_robin_2);
1365 val64 = 0x0304010002060500ULL;
1366 writeq(val64, &bar0->rx_w_round_robin_3);
1367 val64 = 0x0103020400000000ULL;
1368 writeq(val64, &bar0->rx_w_round_robin_4);
1370 val64 = 0x8040201008040201ULL;
1371 writeq(val64, &bar0->rts_qos_steering);
1377 for (i = 0; i < 8; i++)
1378 writeq(val64, &bar0->rts_frm_len_n[i]);
1380 /* Set the default rts frame length for the rings configured */
1381 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1382 for (i = 0 ; i < config->rx_ring_num ; i++)
1383 writeq(val64, &bar0->rts_frm_len_n[i]);
1385 /* Set the frame length for the configured rings
1386 * desired by the user
1388 for (i = 0; i < config->rx_ring_num; i++) {
1389 /* If rts_frm_len[i] == 0 then it is assumed that user not
1390 * specified frame length steering.
1391 * If the user provides the frame length then program
1392 * the rts_frm_len register for those values or else
1393 * leave it as it is.
1395 if (rts_frm_len[i] != 0) {
1396 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1397 &bar0->rts_frm_len_n[i]);
1401 /* Disable differentiated services steering logic */
1402 for (i = 0; i < 64; i++) {
1403 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1404 DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1406 DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
1411 /* Program statistics memory */
1412 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1414 if (nic->device_type == XFRAME_II_DEVICE) {
1415 val64 = STAT_BC(0x320);
1416 writeq(val64, &bar0->stat_byte_cnt);
1420 * Initializing the sampling rate for the device to calculate the
1421 * bandwidth utilization.
1423 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1424 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1425 writeq(val64, &bar0->mac_link_util);
1429 * Initializing the Transmit and Receive Traffic Interrupt
1433 * TTI Initialization. Default Tx timer gets us about
1434 * 250 interrupts per sec. Continuous interrupts are enabled
1437 if (nic->device_type == XFRAME_II_DEVICE) {
1438 int count = (nic->config.bus_speed * 125)/2;
1439 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1442 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1444 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1445 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1446 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1447 if (use_continuous_tx_intrs)
1448 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1449 writeq(val64, &bar0->tti_data1_mem);
1451 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1452 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1453 TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1454 writeq(val64, &bar0->tti_data2_mem);
1456 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1457 writeq(val64, &bar0->tti_command_mem);
1460 * Once the operation completes, the Strobe bit of the command
1461 * register will be reset. We poll for this particular condition
1462 * We wait for a maximum of 500ms for the operation to complete,
1463 * if it's not complete by then we return error.
1467 val64 = readq(&bar0->tti_command_mem);
1468 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1472 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1480 if (nic->config.bimodal) {
1482 for (k = 0; k < config->rx_ring_num; k++) {
1483 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1484 val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1485 writeq(val64, &bar0->tti_command_mem);
1488 * Once the operation completes, the Strobe bit of the command
1489 * register will be reset. We poll for this particular condition
1490 * We wait for a maximum of 500ms for the operation to complete,
1491 * if it's not complete by then we return error.
1495 val64 = readq(&bar0->tti_command_mem);
1496 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1501 "%s: TTI init Failed\n",
1511 /* RTI Initialization */
1512 if (nic->device_type == XFRAME_II_DEVICE) {
1514 * Programmed to generate Apprx 500 Intrs per
1517 int count = (nic->config.bus_speed * 125)/4;
1518 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1520 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1522 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1523 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1524 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1526 writeq(val64, &bar0->rti_data1_mem);
1528 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1529 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1530 if (nic->intr_type == MSI_X)
1531 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1532 RTI_DATA2_MEM_RX_UFC_D(0x40));
1534 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1535 RTI_DATA2_MEM_RX_UFC_D(0x80));
1536 writeq(val64, &bar0->rti_data2_mem);
1538 for (i = 0; i < config->rx_ring_num; i++) {
1539 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1540 | RTI_CMD_MEM_OFFSET(i);
1541 writeq(val64, &bar0->rti_command_mem);
1544 * Once the operation completes, the Strobe bit of the
1545 * command register will be reset. We poll for this
1546 * particular condition. We wait for a maximum of 500ms
1547 * for the operation to complete, if it's not complete
1548 * by then we return error.
1552 val64 = readq(&bar0->rti_command_mem);
1553 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1557 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1568 * Initializing proper values as Pause threshold into all
1569 * the 8 Queues on Rx side.
1571 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1572 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1574 /* Disable RMAC PAD STRIPPING */
1575 add = &bar0->mac_cfg;
1576 val64 = readq(&bar0->mac_cfg);
1577 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1578 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1579 writel((u32) (val64), add);
1580 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1581 writel((u32) (val64 >> 32), (add + 4));
1582 val64 = readq(&bar0->mac_cfg);
1584 /* Enable FCS stripping by adapter */
1585 add = &bar0->mac_cfg;
1586 val64 = readq(&bar0->mac_cfg);
1587 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1588 if (nic->device_type == XFRAME_II_DEVICE)
1589 writeq(val64, &bar0->mac_cfg);
1591 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1592 writel((u32) (val64), add);
1593 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1594 writel((u32) (val64 >> 32), (add + 4));
1598 * Set the time value to be inserted in the pause frame
1599 * generated by xena.
1601 val64 = readq(&bar0->rmac_pause_cfg);
1602 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1603 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1604 writeq(val64, &bar0->rmac_pause_cfg);
1607 * Set the Threshold Limit for Generating the pause frame
1608 * If the amount of data in any Queue exceeds ratio of
1609 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1610 * pause frame is generated
1613 for (i = 0; i < 4; i++) {
1615 (((u64) 0xFF00 | nic->mac_control.
1616 mc_pause_threshold_q0q3)
1619 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1622 for (i = 0; i < 4; i++) {
1624 (((u64) 0xFF00 | nic->mac_control.
1625 mc_pause_threshold_q4q7)
1628 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1631 * TxDMA will stop Read request if the number of read split has
1632 * exceeded the limit pointed by shared_splits
1634 val64 = readq(&bar0->pic_control);
1635 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1636 writeq(val64, &bar0->pic_control);
1638 if (nic->config.bus_speed == 266) {
1639 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1640 writeq(0x0, &bar0->read_retry_delay);
1641 writeq(0x0, &bar0->write_retry_delay);
1645 * Programming the Herc to split every write transaction
1646 * that does not start on an ADB to reduce disconnects.
1648 if (nic->device_type == XFRAME_II_DEVICE) {
1649 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1650 MISC_LINK_STABILITY_PRD(3);
1651 writeq(val64, &bar0->misc_control);
1652 val64 = readq(&bar0->pic_control2);
1653 val64 &= ~(BIT(13)|BIT(14)|BIT(15));
1654 writeq(val64, &bar0->pic_control2);
1656 if (strstr(nic->product_name, "CX4")) {
1657 val64 = TMAC_AVG_IPG(0x17);
1658 writeq(val64, &bar0->tmac_avg_ipg);
1663 #define LINK_UP_DOWN_INTERRUPT 1
1664 #define MAC_RMAC_ERR_TIMER 2
1666 static int s2io_link_fault_indication(struct s2io_nic *nic)
1668 if (nic->intr_type != INTA)
1669 return MAC_RMAC_ERR_TIMER;
1670 if (nic->device_type == XFRAME_II_DEVICE)
1671 return LINK_UP_DOWN_INTERRUPT;
1673 return MAC_RMAC_ERR_TIMER;
1677 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1678 * @nic: device private variable,
1679 * @mask: A mask indicating which Intr block must be modified and,
1680 * @flag: A flag indicating whether to enable or disable the Intrs.
1681 * Description: This function will either disable or enable the interrupts
1682 * depending on the flag argument. The mask argument can be used to
1683 * enable/disable any Intr block.
1684 * Return Value: NONE.
1687 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1689 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1690 register u64 val64 = 0, temp64 = 0;
1692 /* Top level interrupt classification */
1693 /* PIC Interrupts */
1694 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1695 /* Enable PIC Intrs in the general intr mask register */
1696 val64 = TXPIC_INT_M;
1697 if (flag == ENABLE_INTRS) {
1698 temp64 = readq(&bar0->general_int_mask);
1699 temp64 &= ~((u64) val64);
1700 writeq(temp64, &bar0->general_int_mask);
1702 * If Hercules adapter enable GPIO otherwise
1703 * disable all PCIX, Flash, MDIO, IIC and GPIO
1704 * interrupts for now.
1707 if (s2io_link_fault_indication(nic) ==
1708 LINK_UP_DOWN_INTERRUPT ) {
1709 temp64 = readq(&bar0->pic_int_mask);
1710 temp64 &= ~((u64) PIC_INT_GPIO);
1711 writeq(temp64, &bar0->pic_int_mask);
1712 temp64 = readq(&bar0->gpio_int_mask);
1713 temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP);
1714 writeq(temp64, &bar0->gpio_int_mask);
1716 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1719 * No MSI Support is available presently, so TTI and
1720 * RTI interrupts are also disabled.
1722 } else if (flag == DISABLE_INTRS) {
1724 * Disable PIC Intrs in the general
1725 * intr mask register
1727 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1728 temp64 = readq(&bar0->general_int_mask);
1730 writeq(val64, &bar0->general_int_mask);
1734 /* MAC Interrupts */
1735 /* Enabling/Disabling MAC interrupts */
1736 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1737 val64 = TXMAC_INT_M | RXMAC_INT_M;
1738 if (flag == ENABLE_INTRS) {
1739 temp64 = readq(&bar0->general_int_mask);
1740 temp64 &= ~((u64) val64);
1741 writeq(temp64, &bar0->general_int_mask);
1743 * All MAC block error interrupts are disabled for now
1746 } else if (flag == DISABLE_INTRS) {
1748 * Disable MAC Intrs in the general intr mask register
1750 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1751 writeq(DISABLE_ALL_INTRS,
1752 &bar0->mac_rmac_err_mask);
1754 temp64 = readq(&bar0->general_int_mask);
1756 writeq(val64, &bar0->general_int_mask);
1760 /* Tx traffic interrupts */
1761 if (mask & TX_TRAFFIC_INTR) {
1762 val64 = TXTRAFFIC_INT_M;
1763 if (flag == ENABLE_INTRS) {
1764 temp64 = readq(&bar0->general_int_mask);
1765 temp64 &= ~((u64) val64);
1766 writeq(temp64, &bar0->general_int_mask);
1768 * Enable all the Tx side interrupts
1769 * writing 0 Enables all 64 TX interrupt levels
1771 writeq(0x0, &bar0->tx_traffic_mask);
1772 } else if (flag == DISABLE_INTRS) {
1774 * Disable Tx Traffic Intrs in the general intr mask
1777 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1778 temp64 = readq(&bar0->general_int_mask);
1780 writeq(val64, &bar0->general_int_mask);
1784 /* Rx traffic interrupts */
1785 if (mask & RX_TRAFFIC_INTR) {
1786 val64 = RXTRAFFIC_INT_M;
1787 if (flag == ENABLE_INTRS) {
1788 temp64 = readq(&bar0->general_int_mask);
1789 temp64 &= ~((u64) val64);
1790 writeq(temp64, &bar0->general_int_mask);
1791 /* writing 0 Enables all 8 RX interrupt levels */
1792 writeq(0x0, &bar0->rx_traffic_mask);
1793 } else if (flag == DISABLE_INTRS) {
1795 * Disable Rx Traffic Intrs in the general intr mask
1798 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1799 temp64 = readq(&bar0->general_int_mask);
1801 writeq(val64, &bar0->general_int_mask);
1807 * verify_pcc_quiescent- Checks for PCC quiescent state
1808 * Return: 1 If PCC is quiescence
1809 * 0 If PCC is not quiescence
1811 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
1814 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1815 u64 val64 = readq(&bar0->adapter_status);
1817 herc = (sp->device_type == XFRAME_II_DEVICE);
1819 if (flag == FALSE) {
1820 if ((!herc && (get_xena_rev_id(sp->pdev) >= 4)) || herc) {
1821 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
1824 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
1828 if ((!herc && (get_xena_rev_id(sp->pdev) >= 4)) || herc) {
1829 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1830 ADAPTER_STATUS_RMAC_PCC_IDLE))
1833 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1834 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
1842 * verify_xena_quiescence - Checks whether the H/W is ready
1843 * Description: Returns whether the H/W is ready to go or not. Depending
1844 * on whether adapter enable bit was written or not the comparison
1845 * differs and the calling function passes the input argument flag to
1847 * Return: 1 If xena is quiescence
1848 * 0 If Xena is not quiescence
1851 static int verify_xena_quiescence(struct s2io_nic *sp)
1854 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1855 u64 val64 = readq(&bar0->adapter_status);
1856 mode = s2io_verify_pci_mode(sp);
1858 if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
1859 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
1862 if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
1863 DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
1866 if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
1867 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
1870 if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
1871 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
1874 if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
1875 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
1878 if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
1879 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
1882 if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
1883 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
1886 if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
1887 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
1892 * In PCI 33 mode, the P_PLL is not used, and therefore,
1893 * the the P_PLL_LOCK bit in the adapter_status register will
1896 if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
1897 sp->device_type == XFRAME_II_DEVICE && mode !=
1899 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
1902 if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1903 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1904 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
1911 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1912 * @sp: Pointer to device specifc structure
1914 * New procedure to clear mac address reading problems on Alpha platforms
1918 static void fix_mac_address(struct s2io_nic * sp)
1920 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1924 while (fix_mac[i] != END_SIGN) {
1925 writeq(fix_mac[i++], &bar0->gpio_control);
1927 val64 = readq(&bar0->gpio_control);
1932 * start_nic - Turns the device on
1933 * @nic : device private variable.
1935 * This function actually turns the device on. Before this function is
1936 * called,all Registers are configured from their reset states
1937 * and shared memory is allocated but the NIC is still quiescent. On
1938 * calling this function, the device interrupts are cleared and the NIC is
1939 * literally switched on by writing into the adapter control register.
1941 * SUCCESS on success and -1 on failure.
1944 static int start_nic(struct s2io_nic *nic)
1946 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1947 struct net_device *dev = nic->dev;
1948 register u64 val64 = 0;
1950 struct mac_info *mac_control;
1951 struct config_param *config;
1953 mac_control = &nic->mac_control;
1954 config = &nic->config;
1956 /* PRC Initialization and configuration */
1957 for (i = 0; i < config->rx_ring_num; i++) {
1958 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1959 &bar0->prc_rxd0_n[i]);
1961 val64 = readq(&bar0->prc_ctrl_n[i]);
1962 if (nic->config.bimodal)
1963 val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
1964 if (nic->rxd_mode == RXD_MODE_1)
1965 val64 |= PRC_CTRL_RC_ENABLED;
1967 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1968 if (nic->device_type == XFRAME_II_DEVICE)
1969 val64 |= PRC_CTRL_GROUP_READS;
1970 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
1971 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
1972 writeq(val64, &bar0->prc_ctrl_n[i]);
1975 if (nic->rxd_mode == RXD_MODE_3B) {
1976 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1977 val64 = readq(&bar0->rx_pa_cfg);
1978 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1979 writeq(val64, &bar0->rx_pa_cfg);
1982 if (vlan_tag_strip == 0) {
1983 val64 = readq(&bar0->rx_pa_cfg);
1984 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
1985 writeq(val64, &bar0->rx_pa_cfg);
1986 vlan_strip_flag = 0;
1990 * Enabling MC-RLDRAM. After enabling the device, we timeout
1991 * for around 100ms, which is approximately the time required
1992 * for the device to be ready for operation.
1994 val64 = readq(&bar0->mc_rldram_mrs);
1995 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1996 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1997 val64 = readq(&bar0->mc_rldram_mrs);
1999 msleep(100); /* Delay by around 100 ms. */
2001 /* Enabling ECC Protection. */
2002 val64 = readq(&bar0->adapter_control);
2003 val64 &= ~ADAPTER_ECC_EN;
2004 writeq(val64, &bar0->adapter_control);
2007 * Clearing any possible Link state change interrupts that
2008 * could have popped up just before Enabling the card.
2010 val64 = readq(&bar0->mac_rmac_err_reg);
2012 writeq(val64, &bar0->mac_rmac_err_reg);
2015 * Verify if the device is ready to be enabled, if so enable
2018 val64 = readq(&bar0->adapter_status);
2019 if (!verify_xena_quiescence(nic)) {
2020 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2021 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2022 (unsigned long long) val64);
2027 * With some switches, link might be already up at this point.
2028 * Because of this weird behavior, when we enable laser,
2029 * we may not get link. We need to handle this. We cannot
2030 * figure out which switch is misbehaving. So we are forced to
2031 * make a global change.
2034 /* Enabling Laser. */
2035 val64 = readq(&bar0->adapter_control);
2036 val64 |= ADAPTER_EOI_TX_ON;
2037 writeq(val64, &bar0->adapter_control);
2039 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2041 * Dont see link state interrupts initally on some switches,
2042 * so directly scheduling the link state task here.
2044 schedule_work(&nic->set_link_task);
2046 /* SXE-002: Initialize link and activity LED */
2047 subid = nic->pdev->subsystem_device;
2048 if (((subid & 0xFF) >= 0x07) &&
2049 (nic->device_type == XFRAME_I_DEVICE)) {
2050 val64 = readq(&bar0->gpio_control);
2051 val64 |= 0x0000800000000000ULL;
2052 writeq(val64, &bar0->gpio_control);
2053 val64 = 0x0411040400000000ULL;
2054 writeq(val64, (void __iomem *)bar0 + 0x2700);
2060 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2062 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2063 TxD *txdlp, int get_off)
2065 struct s2io_nic *nic = fifo_data->nic;
2066 struct sk_buff *skb;
2071 if (txds->Host_Control == (u64)(long)nic->ufo_in_band_v) {
2072 pci_unmap_single(nic->pdev, (dma_addr_t)
2073 txds->Buffer_Pointer, sizeof(u64),
2078 skb = (struct sk_buff *) ((unsigned long)
2079 txds->Host_Control);
2081 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2084 pci_unmap_single(nic->pdev, (dma_addr_t)
2085 txds->Buffer_Pointer,
2086 skb->len - skb->data_len,
2088 frg_cnt = skb_shinfo(skb)->nr_frags;
2091 for (j = 0; j < frg_cnt; j++, txds++) {
2092 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2093 if (!txds->Buffer_Pointer)
2095 pci_unmap_page(nic->pdev, (dma_addr_t)
2096 txds->Buffer_Pointer,
2097 frag->size, PCI_DMA_TODEVICE);
2100 memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
2105 * free_tx_buffers - Free all queued Tx buffers
2106 * @nic : device private variable.
2108 * Free all queued Tx buffers.
2109 * Return Value: void
2112 static void free_tx_buffers(struct s2io_nic *nic)
2114 struct net_device *dev = nic->dev;
2115 struct sk_buff *skb;
2118 struct mac_info *mac_control;
2119 struct config_param *config;
2122 mac_control = &nic->mac_control;
2123 config = &nic->config;
2125 for (i = 0; i < config->tx_fifo_num; i++) {
2126 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2127 txdp = (struct TxD *) mac_control->fifos[i].list_info[j].
2129 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2136 "%s:forcibly freeing %d skbs on FIFO%d\n",
2138 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2139 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2144 * stop_nic - To stop the nic
2145 * @nic ; device private variable.
2147 * This function does exactly the opposite of what the start_nic()
2148 * function does. This function is called to stop the device.
2153 static void stop_nic(struct s2io_nic *nic)
2155 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2156 register u64 val64 = 0;
2158 struct mac_info *mac_control;
2159 struct config_param *config;
2161 mac_control = &nic->mac_control;
2162 config = &nic->config;
2164 /* Disable all interrupts */
2165 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2166 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2167 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
2168 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2170 /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2171 val64 = readq(&bar0->adapter_control);
2172 val64 &= ~(ADAPTER_CNTL_EN);
2173 writeq(val64, &bar0->adapter_control);
2176 static int fill_rxd_3buf(struct s2io_nic *nic, struct RxD_t *rxdp, struct \
2179 struct net_device *dev = nic->dev;
2180 struct sk_buff *frag_list;
2183 /* Buffer-1 receives L3/L4 headers */
2184 ((struct RxD3*)rxdp)->Buffer1_ptr = pci_map_single
2185 (nic->pdev, skb->data, l3l4hdr_size + 4,
2186 PCI_DMA_FROMDEVICE);
2188 /* skb_shinfo(skb)->frag_list will have L4 data payload */
2189 skb_shinfo(skb)->frag_list = dev_alloc_skb(dev->mtu + ALIGN_SIZE);
2190 if (skb_shinfo(skb)->frag_list == NULL) {
2191 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n ", dev->name);
2194 frag_list = skb_shinfo(skb)->frag_list;
2195 skb->truesize += frag_list->truesize;
2196 frag_list->next = NULL;
2197 tmp = (void *)ALIGN((long)frag_list->data, ALIGN_SIZE + 1);
2198 frag_list->data = tmp;
2199 frag_list->tail = tmp;
2201 /* Buffer-2 receives L4 data payload */
2202 ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev,
2203 frag_list->data, dev->mtu,
2204 PCI_DMA_FROMDEVICE);
2205 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
2206 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
2212 * fill_rx_buffers - Allocates the Rx side skbs
2213 * @nic: device private variable
2214 * @ring_no: ring number
2216 * The function allocates Rx side skbs and puts the physical
2217 * address of these buffers into the RxD buffer pointers, so that the NIC
2218 * can DMA the received frame into these locations.
2219 * The NIC supports 3 receive modes, viz
2221 * 2. three buffer and
2222 * 3. Five buffer modes.
2223 * Each mode defines how many fragments the received frame will be split
2224 * up into by the NIC. The frame is split into L3 header, L4 Header,
2225 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2226 * is split into 3 fragments. As of now only single buffer mode is
2229 * SUCCESS on success or an appropriate -ve value on failure.
2232 static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2234 struct net_device *dev = nic->dev;
2235 struct sk_buff *skb;
2237 int off, off1, size, block_no, block_no1;
2240 struct mac_info *mac_control;
2241 struct config_param *config;
2244 unsigned long flags;
2245 struct RxD_t *first_rxdp = NULL;
2247 mac_control = &nic->mac_control;
2248 config = &nic->config;
2249 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2250 atomic_read(&nic->rx_bufs_left[ring_no]);
2252 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
2253 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2254 while (alloc_tab < alloc_cnt) {
2255 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2257 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2259 rxdp = mac_control->rings[ring_no].
2260 rx_blocks[block_no].rxds[off].virt_addr;
2262 if ((block_no == block_no1) && (off == off1) &&
2263 (rxdp->Host_Control)) {
2264 DBG_PRINT(INTR_DBG, "%s: Get and Put",
2266 DBG_PRINT(INTR_DBG, " info equated\n");
2269 if (off && (off == rxd_count[nic->rxd_mode])) {
2270 mac_control->rings[ring_no].rx_curr_put_info.
2272 if (mac_control->rings[ring_no].rx_curr_put_info.
2273 block_index == mac_control->rings[ring_no].
2275 mac_control->rings[ring_no].rx_curr_put_info.
2277 block_no = mac_control->rings[ring_no].
2278 rx_curr_put_info.block_index;
2279 if (off == rxd_count[nic->rxd_mode])
2281 mac_control->rings[ring_no].rx_curr_put_info.
2283 rxdp = mac_control->rings[ring_no].
2284 rx_blocks[block_no].block_virt_addr;
2285 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2289 spin_lock_irqsave(&nic->put_lock, flags);
2290 mac_control->rings[ring_no].put_pos =
2291 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2292 spin_unlock_irqrestore(&nic->put_lock, flags);
2294 mac_control->rings[ring_no].put_pos =
2295 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2297 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2298 ((nic->rxd_mode >= RXD_MODE_3A) &&
2299 (rxdp->Control_2 & BIT(0)))) {
2300 mac_control->rings[ring_no].rx_curr_put_info.
2304 /* calculate size of skb based on ring mode */
2305 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2306 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2307 if (nic->rxd_mode == RXD_MODE_1)
2308 size += NET_IP_ALIGN;
2309 else if (nic->rxd_mode == RXD_MODE_3B)
2310 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2312 size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
2315 skb = dev_alloc_skb(size);
2317 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
2318 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
2321 first_rxdp->Control_1 |= RXD_OWN_XENA;
2325 if (nic->rxd_mode == RXD_MODE_1) {
2326 /* 1 buffer mode - normal operation mode */
2327 memset(rxdp, 0, sizeof(struct RxD1));
2328 skb_reserve(skb, NET_IP_ALIGN);
2329 ((struct RxD1*)rxdp)->Buffer0_ptr = pci_map_single
2330 (nic->pdev, skb->data, size - NET_IP_ALIGN,
2331 PCI_DMA_FROMDEVICE);
2332 rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2334 } else if (nic->rxd_mode >= RXD_MODE_3A) {
2336 * 2 or 3 buffer mode -
2337 * Both 2 buffer mode and 3 buffer mode provides 128
2338 * byte aligned receive buffers.
2340 * 3 buffer mode provides header separation where in
2341 * skb->data will have L3/L4 headers where as
2342 * skb_shinfo(skb)->frag_list will have the L4 data
2346 memset(rxdp, 0, sizeof(struct RxD3));
2347 ba = &mac_control->rings[ring_no].ba[block_no][off];
2348 skb_reserve(skb, BUF0_LEN);
2349 tmp = (u64)(unsigned long) skb->data;
2352 skb->data = (void *) (unsigned long)tmp;
2353 skb->tail = (void *) (unsigned long)tmp;
2355 if (!(((struct RxD3*)rxdp)->Buffer0_ptr))
2356 ((struct RxD3*)rxdp)->Buffer0_ptr =
2357 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2358 PCI_DMA_FROMDEVICE);
2360 pci_dma_sync_single_for_device(nic->pdev,
2361 (dma_addr_t) ((struct RxD3*)rxdp)->Buffer0_ptr,
2362 BUF0_LEN, PCI_DMA_FROMDEVICE);
2363 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2364 if (nic->rxd_mode == RXD_MODE_3B) {
2365 /* Two buffer mode */
2368 * Buffer2 will have L3/L4 header plus
2371 ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single
2372 (nic->pdev, skb->data, dev->mtu + 4,
2373 PCI_DMA_FROMDEVICE);
2375 /* Buffer-1 will be dummy buffer. Not used */
2376 if (!(((struct RxD3*)rxdp)->Buffer1_ptr)) {
2377 ((struct RxD3*)rxdp)->Buffer1_ptr =
2378 pci_map_single(nic->pdev,
2380 PCI_DMA_FROMDEVICE);
2382 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2383 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2387 if (fill_rxd_3buf(nic, rxdp, skb) == -ENOMEM) {
2388 dev_kfree_skb_irq(skb);
2391 first_rxdp->Control_1 |=
2397 rxdp->Control_2 |= BIT(0);
2399 rxdp->Host_Control = (unsigned long) (skb);
2400 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2401 rxdp->Control_1 |= RXD_OWN_XENA;
2403 if (off == (rxd_count[nic->rxd_mode] + 1))
2405 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2407 rxdp->Control_2 |= SET_RXD_MARKER;
2408 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2411 first_rxdp->Control_1 |= RXD_OWN_XENA;
2415 atomic_inc(&nic->rx_bufs_left[ring_no]);
2420 /* Transfer ownership of first descriptor to adapter just before
2421 * exiting. Before that, use memory barrier so that ownership
2422 * and other fields are seen by adapter correctly.
2426 first_rxdp->Control_1 |= RXD_OWN_XENA;
2432 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2434 struct net_device *dev = sp->dev;
2436 struct sk_buff *skb;
2438 struct mac_info *mac_control;
2441 mac_control = &sp->mac_control;
2442 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2443 rxdp = mac_control->rings[ring_no].
2444 rx_blocks[blk].rxds[j].virt_addr;
2445 skb = (struct sk_buff *)
2446 ((unsigned long) rxdp->Host_Control);
2450 if (sp->rxd_mode == RXD_MODE_1) {
2451 pci_unmap_single(sp->pdev, (dma_addr_t)
2452 ((struct RxD1*)rxdp)->Buffer0_ptr,
2454 HEADER_ETHERNET_II_802_3_SIZE
2455 + HEADER_802_2_SIZE +
2457 PCI_DMA_FROMDEVICE);
2458 memset(rxdp, 0, sizeof(struct RxD1));
2459 } else if(sp->rxd_mode == RXD_MODE_3B) {
2460 ba = &mac_control->rings[ring_no].
2462 pci_unmap_single(sp->pdev, (dma_addr_t)
2463 ((struct RxD3*)rxdp)->Buffer0_ptr,
2465 PCI_DMA_FROMDEVICE);
2466 pci_unmap_single(sp->pdev, (dma_addr_t)
2467 ((struct RxD3*)rxdp)->Buffer1_ptr,
2469 PCI_DMA_FROMDEVICE);
2470 pci_unmap_single(sp->pdev, (dma_addr_t)
2471 ((struct RxD3*)rxdp)->Buffer2_ptr,
2473 PCI_DMA_FROMDEVICE);
2474 memset(rxdp, 0, sizeof(struct RxD3));
2476 pci_unmap_single(sp->pdev, (dma_addr_t)
2477 ((struct RxD3*)rxdp)->Buffer0_ptr, BUF0_LEN,
2478 PCI_DMA_FROMDEVICE);
2479 pci_unmap_single(sp->pdev, (dma_addr_t)
2480 ((struct RxD3*)rxdp)->Buffer1_ptr,
2482 PCI_DMA_FROMDEVICE);
2483 pci_unmap_single(sp->pdev, (dma_addr_t)
2484 ((struct RxD3*)rxdp)->Buffer2_ptr, dev->mtu,
2485 PCI_DMA_FROMDEVICE);
2486 memset(rxdp, 0, sizeof(struct RxD3));
2489 atomic_dec(&sp->rx_bufs_left[ring_no]);
2494 * free_rx_buffers - Frees all Rx buffers
2495 * @sp: device private variable.
2497 * This function will free all Rx buffers allocated by host.
2502 static void free_rx_buffers(struct s2io_nic *sp)
2504 struct net_device *dev = sp->dev;
2505 int i, blk = 0, buf_cnt = 0;
2506 struct mac_info *mac_control;
2507 struct config_param *config;
2509 mac_control = &sp->mac_control;
2510 config = &sp->config;
2512 for (i = 0; i < config->rx_ring_num; i++) {
2513 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2514 free_rxd_blk(sp,i,blk);
2516 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2517 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2518 mac_control->rings[i].rx_curr_put_info.offset = 0;
2519 mac_control->rings[i].rx_curr_get_info.offset = 0;
2520 atomic_set(&sp->rx_bufs_left[i], 0);
2521 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2522 dev->name, buf_cnt, i);
2527 * s2io_poll - Rx interrupt handler for NAPI support
2528 * @dev : pointer to the device structure.
2529 * @budget : The number of packets that were budgeted to be processed
2530 * during one pass through the 'Poll" function.
2532 * Comes into picture only if NAPI support has been incorporated. It does
2533 * the same thing that rx_intr_handler does, but not in a interrupt context
2534 * also It will process only a given number of packets.
2536 * 0 on success and 1 if there are No Rx packets to be processed.
2539 static int s2io_poll(struct net_device *dev, int *budget)
2541 struct s2io_nic *nic = dev->priv;
2542 int pkt_cnt = 0, org_pkts_to_process;
2543 struct mac_info *mac_control;
2544 struct config_param *config;
2545 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2548 atomic_inc(&nic->isr_cnt);
2549 mac_control = &nic->mac_control;
2550 config = &nic->config;
2552 nic->pkts_to_process = *budget;
2553 if (nic->pkts_to_process > dev->quota)
2554 nic->pkts_to_process = dev->quota;
2555 org_pkts_to_process = nic->pkts_to_process;
2557 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
2558 readl(&bar0->rx_traffic_int);
2560 for (i = 0; i < config->rx_ring_num; i++) {
2561 rx_intr_handler(&mac_control->rings[i]);
2562 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2563 if (!nic->pkts_to_process) {
2564 /* Quota for the current iteration has been met */
2571 dev->quota -= pkt_cnt;
2573 netif_rx_complete(dev);
2575 for (i = 0; i < config->rx_ring_num; i++) {
2576 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2577 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2578 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2582 /* Re enable the Rx interrupts. */
2583 writeq(0x0, &bar0->rx_traffic_mask);
2584 readl(&bar0->rx_traffic_mask);
2585 atomic_dec(&nic->isr_cnt);
2589 dev->quota -= pkt_cnt;
2592 for (i = 0; i < config->rx_ring_num; i++) {
2593 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2594 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2595 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2599 atomic_dec(&nic->isr_cnt);
2603 #ifdef CONFIG_NET_POLL_CONTROLLER
2605 * s2io_netpoll - netpoll event handler entry point
2606 * @dev : pointer to the device structure.
2608 * This function will be called by upper layer to check for events on the
2609 * interface in situations where interrupts are disabled. It is used for
2610 * specific in-kernel networking tasks, such as remote consoles and kernel
2611 * debugging over the network (example netdump in RedHat).
2613 static void s2io_netpoll(struct net_device *dev)
2615 struct s2io_nic *nic = dev->priv;
2616 struct mac_info *mac_control;
2617 struct config_param *config;
2618 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2619 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2622 disable_irq(dev->irq);
2624 atomic_inc(&nic->isr_cnt);
2625 mac_control = &nic->mac_control;
2626 config = &nic->config;
2628 writeq(val64, &bar0->rx_traffic_int);
2629 writeq(val64, &bar0->tx_traffic_int);
2631 /* we need to free up the transmitted skbufs or else netpoll will
2632 * run out of skbs and will fail and eventually netpoll application such
2633 * as netdump will fail.
2635 for (i = 0; i < config->tx_fifo_num; i++)
2636 tx_intr_handler(&mac_control->fifos[i]);
2638 /* check for received packet and indicate up to network */
2639 for (i = 0; i < config->rx_ring_num; i++)
2640 rx_intr_handler(&mac_control->rings[i]);
2642 for (i = 0; i < config->rx_ring_num; i++) {
2643 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2644 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2645 DBG_PRINT(ERR_DBG, " in Rx Netpoll!!\n");
2649 atomic_dec(&nic->isr_cnt);
2650 enable_irq(dev->irq);
2656 * rx_intr_handler - Rx interrupt handler
2657 * @nic: device private variable.
2659 * If the interrupt is because of a received frame or if the
2660 * receive ring contains fresh as yet un-processed frames,this function is
2661 * called. It picks out the RxD at which place the last Rx processing had
2662 * stopped and sends the skb to the OSM's Rx handler and then increments
2667 static void rx_intr_handler(struct ring_info *ring_data)
2669 struct s2io_nic *nic = ring_data->nic;
2670 struct net_device *dev = (struct net_device *) nic->dev;
2671 int get_block, put_block, put_offset;
2672 struct rx_curr_get_info get_info, put_info;
2674 struct sk_buff *skb;
2678 spin_lock(&nic->rx_lock);
2679 if (atomic_read(&nic->card_state) == CARD_DOWN) {
2680 DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n",
2681 __FUNCTION__, dev->name);
2682 spin_unlock(&nic->rx_lock);
2686 get_info = ring_data->rx_curr_get_info;
2687 get_block = get_info.block_index;
2688 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2689 put_block = put_info.block_index;
2690 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2692 spin_lock(&nic->put_lock);
2693 put_offset = ring_data->put_pos;
2694 spin_unlock(&nic->put_lock);
2696 put_offset = ring_data->put_pos;
2698 while (RXD_IS_UP2DT(rxdp)) {
2700 * If your are next to put index then it's
2701 * FIFO full condition
2703 if ((get_block == put_block) &&
2704 (get_info.offset + 1) == put_info.offset) {
2705 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
2708 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2710 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2712 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2713 spin_unlock(&nic->rx_lock);
2716 if (nic->rxd_mode == RXD_MODE_1) {
2717 pci_unmap_single(nic->pdev, (dma_addr_t)
2718 ((struct RxD1*)rxdp)->Buffer0_ptr,
2720 HEADER_ETHERNET_II_802_3_SIZE +
2723 PCI_DMA_FROMDEVICE);
2724 } else if (nic->rxd_mode == RXD_MODE_3B) {
2725 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2726 ((struct RxD3*)rxdp)->Buffer0_ptr,
2727 BUF0_LEN, PCI_DMA_FROMDEVICE);
2728 pci_unmap_single(nic->pdev, (dma_addr_t)
2729 ((struct RxD3*)rxdp)->Buffer2_ptr,
2731 PCI_DMA_FROMDEVICE);
2733 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2734 ((struct RxD3*)rxdp)->Buffer0_ptr, BUF0_LEN,
2735 PCI_DMA_FROMDEVICE);
2736 pci_unmap_single(nic->pdev, (dma_addr_t)
2737 ((struct RxD3*)rxdp)->Buffer1_ptr,
2739 PCI_DMA_FROMDEVICE);
2740 pci_unmap_single(nic->pdev, (dma_addr_t)
2741 ((struct RxD3*)rxdp)->Buffer2_ptr,
2742 dev->mtu, PCI_DMA_FROMDEVICE);
2744 prefetch(skb->data);
2745 rx_osm_handler(ring_data, rxdp);
2747 ring_data->rx_curr_get_info.offset = get_info.offset;
2748 rxdp = ring_data->rx_blocks[get_block].
2749 rxds[get_info.offset].virt_addr;
2750 if (get_info.offset == rxd_count[nic->rxd_mode]) {
2751 get_info.offset = 0;
2752 ring_data->rx_curr_get_info.offset = get_info.offset;
2754 if (get_block == ring_data->block_count)
2756 ring_data->rx_curr_get_info.block_index = get_block;
2757 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2760 nic->pkts_to_process -= 1;
2761 if ((napi) && (!nic->pkts_to_process))
2764 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2768 /* Clear all LRO sessions before exiting */
2769 for (i=0; i<MAX_LRO_SESSIONS; i++) {
2770 struct lro *lro = &nic->lro0_n[i];
2772 update_L3L4_header(nic, lro);
2773 queue_rx_frame(lro->parent);
2774 clear_lro_session(lro);
2779 spin_unlock(&nic->rx_lock);
2783 * tx_intr_handler - Transmit interrupt handler
2784 * @nic : device private variable
2786 * If an interrupt was raised to indicate DMA complete of the
2787 * Tx packet, this function is called. It identifies the last TxD
2788 * whose buffer was freed and frees all skbs whose data have already
2789 * DMA'ed into the NICs internal memory.
2794 static void tx_intr_handler(struct fifo_info *fifo_data)
2796 struct s2io_nic *nic = fifo_data->nic;
2797 struct net_device *dev = (struct net_device *) nic->dev;
2798 struct tx_curr_get_info get_info, put_info;
2799 struct sk_buff *skb;
2802 get_info = fifo_data->tx_curr_get_info;
2803 memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
2804 txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
2806 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2807 (get_info.offset != put_info.offset) &&
2808 (txdlp->Host_Control)) {
2809 /* Check for TxD errors */
2810 if (txdlp->Control_1 & TXD_T_CODE) {
2811 unsigned long long err;
2812 err = txdlp->Control_1 & TXD_T_CODE;
2814 nic->mac_control.stats_info->sw_stat.
2817 if ((err >> 48) == 0xA) {
2818 DBG_PRINT(TX_DBG, "TxD returned due \
2819 to loss of link\n");
2822 DBG_PRINT(ERR_DBG, "***TxD error %llx\n", err);
2826 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
2828 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2830 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2834 /* Updating the statistics block */
2835 nic->stats.tx_bytes += skb->len;
2836 dev_kfree_skb_irq(skb);
2839 if (get_info.offset == get_info.fifo_len + 1)
2840 get_info.offset = 0;
2841 txdlp = (struct TxD *) fifo_data->list_info
2842 [get_info.offset].list_virt_addr;
2843 fifo_data->tx_curr_get_info.offset =
2847 spin_lock(&nic->tx_lock);
2848 if (netif_queue_stopped(dev))
2849 netif_wake_queue(dev);
2850 spin_unlock(&nic->tx_lock);
2854 * s2io_mdio_write - Function to write in to MDIO registers
2855 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2856 * @addr : address value
2857 * @value : data value
2858 * @dev : pointer to net_device structure
2860 * This function is used to write values to the MDIO registers
2863 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
2866 struct s2io_nic *sp = dev->priv;
2867 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2869 //address transaction
2870 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2871 | MDIO_MMD_DEV_ADDR(mmd_type)
2872 | MDIO_MMS_PRT_ADDR(0x0);
2873 writeq(val64, &bar0->mdio_control);
2874 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2875 writeq(val64, &bar0->mdio_control);
2880 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2881 | MDIO_MMD_DEV_ADDR(mmd_type)
2882 | MDIO_MMS_PRT_ADDR(0x0)
2883 | MDIO_MDIO_DATA(value)
2884 | MDIO_OP(MDIO_OP_WRITE_TRANS);
2885 writeq(val64, &bar0->mdio_control);
2886 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2887 writeq(val64, &bar0->mdio_control);
2891 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2892 | MDIO_MMD_DEV_ADDR(mmd_type)
2893 | MDIO_MMS_PRT_ADDR(0x0)
2894 | MDIO_OP(MDIO_OP_READ_TRANS);
2895 writeq(val64, &bar0->mdio_control);
2896 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2897 writeq(val64, &bar0->mdio_control);
2903 * s2io_mdio_read - Function to write in to MDIO registers
2904 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2905 * @addr : address value
2906 * @dev : pointer to net_device structure
2908 * This function is used to read values to the MDIO registers
2911 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
2915 struct s2io_nic *sp = dev->priv;
2916 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2918 /* address transaction */
2919 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2920 | MDIO_MMD_DEV_ADDR(mmd_type)
2921 | MDIO_MMS_PRT_ADDR(0x0);
2922 writeq(val64, &bar0->mdio_control);
2923 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2924 writeq(val64, &bar0->mdio_control);
2927 /* Data transaction */
2929 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2930 | MDIO_MMD_DEV_ADDR(mmd_type)
2931 | MDIO_MMS_PRT_ADDR(0x0)
2932 | MDIO_OP(MDIO_OP_READ_TRANS);
2933 writeq(val64, &bar0->mdio_control);
2934 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2935 writeq(val64, &bar0->mdio_control);
2938 /* Read the value from regs */
2939 rval64 = readq(&bar0->mdio_control);
2940 rval64 = rval64 & 0xFFFF0000;
2941 rval64 = rval64 >> 16;
2945 * s2io_chk_xpak_counter - Function to check the status of the xpak counters
2946 * @counter : couter value to be updated
2947 * @flag : flag to indicate the status
2948 * @type : counter type
2950 * This function is to check the status of the xpak counters value
2954 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
2959 for(i = 0; i <index; i++)
2964 *counter = *counter + 1;
2965 val64 = *regs_stat & mask;
2966 val64 = val64 >> (index * 0x2);
2973 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
2974 "service. Excessive temperatures may "
2975 "result in premature transceiver "
2979 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
2980 "service Excessive bias currents may "
2981 "indicate imminent laser diode "
2985 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
2986 "service Excessive laser output "
2987 "power may saturate far-end "
2991 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
2996 val64 = val64 << (index * 0x2);
2997 *regs_stat = (*regs_stat & (~mask)) | (val64);
3000 *regs_stat = *regs_stat & (~mask);
3005 * s2io_updt_xpak_counter - Function to update the xpak counters
3006 * @dev : pointer to net_device struct
3008 * This function is to upate the status of the xpak counters value
3011 static void s2io_updt_xpak_counter(struct net_device *dev)
3019 struct s2io_nic *sp = dev->priv;
3020 struct stat_block *stat_info = sp->mac_control.stats_info;
3022 /* Check the communication with the MDIO slave */
3025 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3026 if((val64 == 0xFFFF) || (val64 == 0x0000))
3028 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3029 "Returned %llx\n", (unsigned long long)val64);
3033 /* Check for the expecte value of 2040 at PMA address 0x0000 */
3036 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3037 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3038 (unsigned long long)val64);
3042 /* Loading the DOM register to MDIO register */
3044 s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3045 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3047 /* Reading the Alarm flags */
3050 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3052 flag = CHECKBIT(val64, 0x7);
3054 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3055 &stat_info->xpak_stat.xpak_regs_stat,
3058 if(CHECKBIT(val64, 0x6))
3059 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3061 flag = CHECKBIT(val64, 0x3);
3063 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3064 &stat_info->xpak_stat.xpak_regs_stat,
3067 if(CHECKBIT(val64, 0x2))
3068 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3070 flag = CHECKBIT(val64, 0x1);
3072 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3073 &stat_info->xpak_stat.xpak_regs_stat,
3076 if(CHECKBIT(val64, 0x0))
3077 stat_info->xpak_stat.alarm_laser_output_power_low++;
3079 /* Reading the Warning flags */
3082 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3084 if(CHECKBIT(val64, 0x7))
3085 stat_info->xpak_stat.warn_transceiver_temp_high++;
3087 if(CHECKBIT(val64, 0x6))
3088 stat_info->xpak_stat.warn_transceiver_temp_low++;
3090 if(CHECKBIT(val64, 0x3))
3091 stat_info->xpak_stat.warn_laser_bias_current_high++;
3093 if(CHECKBIT(val64, 0x2))
3094 stat_info->xpak_stat.warn_laser_bias_current_low++;
3096 if(CHECKBIT(val64, 0x1))
3097 stat_info->xpak_stat.warn_laser_output_power_high++;
3099 if(CHECKBIT(val64, 0x0))
3100 stat_info->xpak_stat.warn_laser_output_power_low++;
3104 * alarm_intr_handler - Alarm Interrrupt handler
3105 * @nic: device private variable
3106 * Description: If the interrupt was neither because of Rx packet or Tx
3107 * complete, this function is called. If the interrupt was to indicate
3108 * a loss of link, the OSM link status handler is invoked for any other
3109 * alarm interrupt the block that raised the interrupt is displayed
3110 * and a H/W reset is issued.
3115 static void alarm_intr_handler(struct s2io_nic *nic)
3117 struct net_device *dev = (struct net_device *) nic->dev;
3118 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3119 register u64 val64 = 0, err_reg = 0;
3122 if (atomic_read(&nic->card_state) == CARD_DOWN)
3124 nic->mac_control.stats_info->sw_stat.ring_full_cnt = 0;
3125 /* Handling the XPAK counters update */
3126 if(nic->mac_control.stats_info->xpak_stat.xpak_timer_count < 72000) {
3127 /* waiting for an hour */
3128 nic->mac_control.stats_info->xpak_stat.xpak_timer_count++;