1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2005 Neterion Inc.
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
30 * rx_ring_num : This can be used to program the number of receive rings used
32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
33 * This is also an array of size 8.
34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35 * values are 1, 2 and 3.
36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
38 * Tx descriptors that can be associated with each corresponding FIFO.
39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40 * 1(MSI), 2(MSI_X). Default value is '0(INTA)'
41 * lro: Specifies whether to enable Large Receive Offload (LRO) or not.
42 * Possible values '1' for enable '0' for disable. Default is '0'
43 * lro_max_pkts: This parameter defines maximum number of packets can be
44 * aggregated as a single large packet
45 ************************************************************************/
47 #include <linux/config.h>
48 #include <linux/module.h>
49 #include <linux/types.h>
50 #include <linux/errno.h>
51 #include <linux/ioport.h>
52 #include <linux/pci.h>
53 #include <linux/dma-mapping.h>
54 #include <linux/kernel.h>
55 #include <linux/netdevice.h>
56 #include <linux/etherdevice.h>
57 #include <linux/skbuff.h>
58 #include <linux/init.h>
59 #include <linux/delay.h>
60 #include <linux/stddef.h>
61 #include <linux/ioctl.h>
62 #include <linux/timex.h>
63 #include <linux/sched.h>
64 #include <linux/ethtool.h>
65 #include <linux/workqueue.h>
66 #include <linux/if_vlan.h>
68 #include <linux/tcp.h>
71 #include <asm/system.h>
72 #include <asm/uaccess.h>
74 #include <asm/div64.h>
78 #include "s2io-regs.h"
80 #define DRV_VERSION "2.0.14.2"
82 /* S2io Driver name & version. */
83 static char s2io_driver_name[] = "Neterion";
84 static char s2io_driver_version[] = DRV_VERSION;
86 static int rxd_size[4] = {32,48,48,64};
87 static int rxd_count[4] = {127,85,85,63};
89 static inline int RXD_IS_UP2DT(RxD_t *rxdp)
93 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
94 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
100 * Cards with following subsystem_id have a link state indication
101 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
102 * macro below identifies these cards given the subsystem_id.
104 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
105 (dev_type == XFRAME_I_DEVICE) ? \
106 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
107 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
109 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
110 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
111 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
114 static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
116 mac_info_t *mac_control;
118 mac_control = &sp->mac_control;
119 if (rxb_size <= rxd_count[sp->rxd_mode])
121 else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
126 /* Ethtool related variables and Macros. */
127 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
128 "Register test\t(offline)",
129 "Eeprom test\t(offline)",
130 "Link test\t(online)",
131 "RLDRAM test\t(offline)",
132 "BIST Test\t(offline)"
135 static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
137 {"tmac_data_octets"},
141 {"tmac_pause_ctrl_frms"},
145 {"tmac_any_err_frms"},
146 {"tmac_ttl_less_fb_octets"},
147 {"tmac_vld_ip_octets"},
155 {"rmac_data_octets"},
156 {"rmac_fcs_err_frms"},
158 {"rmac_vld_mcst_frms"},
159 {"rmac_vld_bcst_frms"},
160 {"rmac_in_rng_len_err_frms"},
161 {"rmac_out_rng_len_err_frms"},
163 {"rmac_pause_ctrl_frms"},
164 {"rmac_unsup_ctrl_frms"},
166 {"rmac_accepted_ucst_frms"},
167 {"rmac_accepted_nucst_frms"},
168 {"rmac_discarded_frms"},
169 {"rmac_drop_events"},
170 {"rmac_ttl_less_fb_octets"},
172 {"rmac_usized_frms"},
173 {"rmac_osized_frms"},
175 {"rmac_jabber_frms"},
176 {"rmac_ttl_64_frms"},
177 {"rmac_ttl_65_127_frms"},
178 {"rmac_ttl_128_255_frms"},
179 {"rmac_ttl_256_511_frms"},
180 {"rmac_ttl_512_1023_frms"},
181 {"rmac_ttl_1024_1518_frms"},
189 {"rmac_err_drp_udp"},
190 {"rmac_xgmii_err_sym"},
208 {"rmac_xgmii_data_err_cnt"},
209 {"rmac_xgmii_ctrl_err_cnt"},
210 {"rmac_accepted_ip"},
214 {"new_rd_req_rtry_cnt"},
216 {"wr_rtry_rd_ack_cnt"},
219 {"new_wr_req_rtry_cnt"},
222 {"rd_rtry_wr_ack_cnt"},
230 {"rmac_ttl_1519_4095_frms"},
231 {"rmac_ttl_4096_8191_frms"},
232 {"rmac_ttl_8192_max_frms"},
233 {"rmac_ttl_gt_max_frms"},
234 {"rmac_osized_alt_frms"},
235 {"rmac_jabber_alt_frms"},
236 {"rmac_gt_max_alt_frms"},
238 {"rmac_len_discard"},
239 {"rmac_fcs_discard"},
242 {"rmac_red_discard"},
243 {"rmac_rts_discard"},
244 {"rmac_ingm_full_discard"},
246 {"\n DRIVER STATISTICS"},
247 {"single_bit_ecc_errs"},
248 {"double_bit_ecc_errs"},
254 ("alarm_transceiver_temp_high"),
255 ("alarm_transceiver_temp_low"),
256 ("alarm_laser_bias_current_high"),
257 ("alarm_laser_bias_current_low"),
258 ("alarm_laser_output_power_high"),
259 ("alarm_laser_output_power_low"),
260 ("warn_transceiver_temp_high"),
261 ("warn_transceiver_temp_low"),
262 ("warn_laser_bias_current_high"),
263 ("warn_laser_bias_current_low"),
264 ("warn_laser_output_power_high"),
265 ("warn_laser_output_power_low"),
266 ("lro_aggregated_pkts"),
267 ("lro_flush_both_count"),
268 ("lro_out_of_sequence_pkts"),
269 ("lro_flush_due_to_max_pkts"),
270 ("lro_avg_aggr_pkts"),
273 #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
274 #define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
276 #define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
277 #define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
279 #define S2IO_TIMER_CONF(timer, handle, arg, exp) \
280 init_timer(&timer); \
281 timer.function = handle; \
282 timer.data = (unsigned long) arg; \
283 mod_timer(&timer, (jiffies + exp)) \
286 static void s2io_vlan_rx_register(struct net_device *dev,
287 struct vlan_group *grp)
289 nic_t *nic = dev->priv;
292 spin_lock_irqsave(&nic->tx_lock, flags);
294 spin_unlock_irqrestore(&nic->tx_lock, flags);
297 /* Unregister the vlan */
298 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
300 nic_t *nic = dev->priv;
303 spin_lock_irqsave(&nic->tx_lock, flags);
305 nic->vlgrp->vlan_devices[vid] = NULL;
306 spin_unlock_irqrestore(&nic->tx_lock, flags);
310 * Constants to be programmed into the Xena's registers, to configure
315 static const u64 herc_act_dtx_cfg[] = {
317 0x8000051536750000ULL, 0x80000515367500E0ULL,
319 0x8000051536750004ULL, 0x80000515367500E4ULL,
321 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
323 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
325 0x801205150D440000ULL, 0x801205150D4400E0ULL,
327 0x801205150D440004ULL, 0x801205150D4400E4ULL,
329 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
331 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
336 static const u64 xena_dtx_cfg[] = {
338 0x8000051500000000ULL, 0x80000515000000E0ULL,
340 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
342 0x8001051500000000ULL, 0x80010515000000E0ULL,
344 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
346 0x8002051500000000ULL, 0x80020515000000E0ULL,
348 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
353 * Constants for Fixing the MacAddress problem seen mostly on
356 static const u64 fix_mac[] = {
357 0x0060000000000000ULL, 0x0060600000000000ULL,
358 0x0040600000000000ULL, 0x0000600000000000ULL,
359 0x0020600000000000ULL, 0x0060600000000000ULL,
360 0x0020600000000000ULL, 0x0060600000000000ULL,
361 0x0020600000000000ULL, 0x0060600000000000ULL,
362 0x0020600000000000ULL, 0x0060600000000000ULL,
363 0x0020600000000000ULL, 0x0060600000000000ULL,
364 0x0020600000000000ULL, 0x0060600000000000ULL,
365 0x0020600000000000ULL, 0x0060600000000000ULL,
366 0x0020600000000000ULL, 0x0060600000000000ULL,
367 0x0020600000000000ULL, 0x0060600000000000ULL,
368 0x0020600000000000ULL, 0x0060600000000000ULL,
369 0x0020600000000000ULL, 0x0000600000000000ULL,
370 0x0040600000000000ULL, 0x0060600000000000ULL,
374 /* Module Loadable parameters. */
375 static unsigned int tx_fifo_num = 1;
376 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
377 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
378 static unsigned int rx_ring_num = 1;
379 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
380 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
381 static unsigned int rts_frm_len[MAX_RX_RINGS] =
382 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
383 static unsigned int rx_ring_mode = 1;
384 static unsigned int use_continuous_tx_intrs = 1;
385 static unsigned int rmac_pause_time = 0x100;
386 static unsigned int mc_pause_threshold_q0q3 = 187;
387 static unsigned int mc_pause_threshold_q4q7 = 187;
388 static unsigned int shared_splits;
389 static unsigned int tmac_util_period = 5;
390 static unsigned int rmac_util_period = 5;
391 static unsigned int bimodal = 0;
392 static unsigned int l3l4hdr_size = 128;
393 #ifndef CONFIG_S2IO_NAPI
394 static unsigned int indicate_max_pkts;
396 /* Frequency of Rx desc syncs expressed as power of 2 */
397 static unsigned int rxsync_frequency = 3;
398 /* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */
399 static unsigned int intr_type = 0;
400 /* Large receive offload feature */
401 static unsigned int lro = 0;
402 /* Max pkts to be aggregated by LRO at one time. If not specified,
403 * aggregation happens until we hit max IP pkt size(64K)
405 static unsigned int lro_max_pkts = 0xFFFF;
409 * This table lists all the devices that this driver supports.
411 static struct pci_device_id s2io_tbl[] __devinitdata = {
412 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
413 PCI_ANY_ID, PCI_ANY_ID},
414 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
415 PCI_ANY_ID, PCI_ANY_ID},
416 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
417 PCI_ANY_ID, PCI_ANY_ID},
418 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
419 PCI_ANY_ID, PCI_ANY_ID},
423 MODULE_DEVICE_TABLE(pci, s2io_tbl);
425 static struct pci_driver s2io_driver = {
427 .id_table = s2io_tbl,
428 .probe = s2io_init_nic,
429 .remove = __devexit_p(s2io_rem_nic),
432 /* A simplifier macro used both by init and free shared_mem Fns(). */
433 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
436 * init_shared_mem - Allocation and Initialization of Memory
437 * @nic: Device private variable.
438 * Description: The function allocates all the memory areas shared
439 * between the NIC and the driver. This includes Tx descriptors,
440 * Rx descriptors and the statistics block.
443 static int init_shared_mem(struct s2io_nic *nic)
446 void *tmp_v_addr, *tmp_v_addr_next;
447 dma_addr_t tmp_p_addr, tmp_p_addr_next;
448 RxD_block_t *pre_rxd_blk = NULL;
449 int i, j, blk_cnt, rx_sz, tx_sz;
450 int lst_size, lst_per_page;
451 struct net_device *dev = nic->dev;
455 mac_info_t *mac_control;
456 struct config_param *config;
458 mac_control = &nic->mac_control;
459 config = &nic->config;
462 /* Allocation and initialization of TXDLs in FIOFs */
464 for (i = 0; i < config->tx_fifo_num; i++) {
465 size += config->tx_cfg[i].fifo_len;
467 if (size > MAX_AVAILABLE_TXDS) {
468 DBG_PRINT(ERR_DBG, "%s: Requested TxDs too high, ",
470 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
474 lst_size = (sizeof(TxD_t) * config->max_txds);
475 tx_sz = lst_size * size;
476 lst_per_page = PAGE_SIZE / lst_size;
478 for (i = 0; i < config->tx_fifo_num; i++) {
479 int fifo_len = config->tx_cfg[i].fifo_len;
480 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
481 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
483 if (!mac_control->fifos[i].list_info) {
485 "Malloc failed for list_info\n");
488 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
490 for (i = 0; i < config->tx_fifo_num; i++) {
491 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
493 mac_control->fifos[i].tx_curr_put_info.offset = 0;
494 mac_control->fifos[i].tx_curr_put_info.fifo_len =
495 config->tx_cfg[i].fifo_len - 1;
496 mac_control->fifos[i].tx_curr_get_info.offset = 0;
497 mac_control->fifos[i].tx_curr_get_info.fifo_len =
498 config->tx_cfg[i].fifo_len - 1;
499 mac_control->fifos[i].fifo_no = i;
500 mac_control->fifos[i].nic = nic;
501 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
503 for (j = 0; j < page_num; j++) {
507 tmp_v = pci_alloc_consistent(nic->pdev,
511 "pci_alloc_consistent ");
512 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
515 /* If we got a zero DMA address(can happen on
516 * certain platforms like PPC), reallocate.
517 * Store virtual address of page we don't want,
521 mac_control->zerodma_virt_addr = tmp_v;
523 "%s: Zero DMA address for TxDL. ", dev->name);
525 "Virtual address %p\n", tmp_v);
526 tmp_v = pci_alloc_consistent(nic->pdev,
530 "pci_alloc_consistent ");
531 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
535 while (k < lst_per_page) {
536 int l = (j * lst_per_page) + k;
537 if (l == config->tx_cfg[i].fifo_len)
539 mac_control->fifos[i].list_info[l].list_virt_addr =
540 tmp_v + (k * lst_size);
541 mac_control->fifos[i].list_info[l].list_phy_addr =
542 tmp_p + (k * lst_size);
548 nic->ufo_in_band_v = kmalloc((sizeof(u64) * size), GFP_KERNEL);
549 if (!nic->ufo_in_band_v)
552 /* Allocation and initialization of RXDs in Rings */
554 for (i = 0; i < config->rx_ring_num; i++) {
555 if (config->rx_cfg[i].num_rxd %
556 (rxd_count[nic->rxd_mode] + 1)) {
557 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
558 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
560 DBG_PRINT(ERR_DBG, "RxDs per Block");
563 size += config->rx_cfg[i].num_rxd;
564 mac_control->rings[i].block_count =
565 config->rx_cfg[i].num_rxd /
566 (rxd_count[nic->rxd_mode] + 1 );
567 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
568 mac_control->rings[i].block_count;
570 if (nic->rxd_mode == RXD_MODE_1)
571 size = (size * (sizeof(RxD1_t)));
573 size = (size * (sizeof(RxD3_t)));
576 for (i = 0; i < config->rx_ring_num; i++) {
577 mac_control->rings[i].rx_curr_get_info.block_index = 0;
578 mac_control->rings[i].rx_curr_get_info.offset = 0;
579 mac_control->rings[i].rx_curr_get_info.ring_len =
580 config->rx_cfg[i].num_rxd - 1;
581 mac_control->rings[i].rx_curr_put_info.block_index = 0;
582 mac_control->rings[i].rx_curr_put_info.offset = 0;
583 mac_control->rings[i].rx_curr_put_info.ring_len =
584 config->rx_cfg[i].num_rxd - 1;
585 mac_control->rings[i].nic = nic;
586 mac_control->rings[i].ring_no = i;
588 blk_cnt = config->rx_cfg[i].num_rxd /
589 (rxd_count[nic->rxd_mode] + 1);
590 /* Allocating all the Rx blocks */
591 for (j = 0; j < blk_cnt; j++) {
592 rx_block_info_t *rx_blocks;
595 rx_blocks = &mac_control->rings[i].rx_blocks[j];
596 size = SIZE_OF_BLOCK; //size is always page size
597 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
599 if (tmp_v_addr == NULL) {
601 * In case of failure, free_shared_mem()
602 * is called, which should free any
603 * memory that was alloced till the
606 rx_blocks->block_virt_addr = tmp_v_addr;
609 memset(tmp_v_addr, 0, size);
610 rx_blocks->block_virt_addr = tmp_v_addr;
611 rx_blocks->block_dma_addr = tmp_p_addr;
612 rx_blocks->rxds = kmalloc(sizeof(rxd_info_t)*
613 rxd_count[nic->rxd_mode],
615 for (l=0; l<rxd_count[nic->rxd_mode];l++) {
616 rx_blocks->rxds[l].virt_addr =
617 rx_blocks->block_virt_addr +
618 (rxd_size[nic->rxd_mode] * l);
619 rx_blocks->rxds[l].dma_addr =
620 rx_blocks->block_dma_addr +
621 (rxd_size[nic->rxd_mode] * l);
624 /* Interlinking all Rx Blocks */
625 for (j = 0; j < blk_cnt; j++) {
627 mac_control->rings[i].rx_blocks[j].block_virt_addr;
629 mac_control->rings[i].rx_blocks[(j + 1) %
630 blk_cnt].block_virt_addr;
632 mac_control->rings[i].rx_blocks[j].block_dma_addr;
634 mac_control->rings[i].rx_blocks[(j + 1) %
635 blk_cnt].block_dma_addr;
637 pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
638 pre_rxd_blk->reserved_2_pNext_RxD_block =
639 (unsigned long) tmp_v_addr_next;
640 pre_rxd_blk->pNext_RxD_Blk_physical =
641 (u64) tmp_p_addr_next;
644 if (nic->rxd_mode >= RXD_MODE_3A) {
646 * Allocation of Storages for buffer addresses in 2BUFF mode
647 * and the buffers as well.
649 for (i = 0; i < config->rx_ring_num; i++) {
650 blk_cnt = config->rx_cfg[i].num_rxd /
651 (rxd_count[nic->rxd_mode]+ 1);
652 mac_control->rings[i].ba =
653 kmalloc((sizeof(buffAdd_t *) * blk_cnt),
655 if (!mac_control->rings[i].ba)
657 for (j = 0; j < blk_cnt; j++) {
659 mac_control->rings[i].ba[j] =
660 kmalloc((sizeof(buffAdd_t) *
661 (rxd_count[nic->rxd_mode] + 1)),
663 if (!mac_control->rings[i].ba[j])
665 while (k != rxd_count[nic->rxd_mode]) {
666 ba = &mac_control->rings[i].ba[j][k];
668 ba->ba_0_org = (void *) kmalloc
669 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
672 tmp = (unsigned long)ba->ba_0_org;
674 tmp &= ~((unsigned long) ALIGN_SIZE);
675 ba->ba_0 = (void *) tmp;
677 ba->ba_1_org = (void *) kmalloc
678 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
681 tmp = (unsigned long) ba->ba_1_org;
683 tmp &= ~((unsigned long) ALIGN_SIZE);
684 ba->ba_1 = (void *) tmp;
691 /* Allocation and initialization of Statistics block */
692 size = sizeof(StatInfo_t);
693 mac_control->stats_mem = pci_alloc_consistent
694 (nic->pdev, size, &mac_control->stats_mem_phy);
696 if (!mac_control->stats_mem) {
698 * In case of failure, free_shared_mem() is called, which
699 * should free any memory that was alloced till the
704 mac_control->stats_mem_sz = size;
706 tmp_v_addr = mac_control->stats_mem;
707 mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
708 memset(tmp_v_addr, 0, size);
709 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
710 (unsigned long long) tmp_p_addr);
716 * free_shared_mem - Free the allocated Memory
717 * @nic: Device private variable.
718 * Description: This function is to free all memory locations allocated by
719 * the init_shared_mem() function and return it to the kernel.
722 static void free_shared_mem(struct s2io_nic *nic)
724 int i, j, blk_cnt, size;
726 dma_addr_t tmp_p_addr;
727 mac_info_t *mac_control;
728 struct config_param *config;
729 int lst_size, lst_per_page;
730 struct net_device *dev = nic->dev;
735 mac_control = &nic->mac_control;
736 config = &nic->config;
738 lst_size = (sizeof(TxD_t) * config->max_txds);
739 lst_per_page = PAGE_SIZE / lst_size;
741 for (i = 0; i < config->tx_fifo_num; i++) {
742 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
744 for (j = 0; j < page_num; j++) {
745 int mem_blks = (j * lst_per_page);
746 if (!mac_control->fifos[i].list_info)
748 if (!mac_control->fifos[i].list_info[mem_blks].
751 pci_free_consistent(nic->pdev, PAGE_SIZE,
752 mac_control->fifos[i].
755 mac_control->fifos[i].
759 /* If we got a zero DMA address during allocation,
762 if (mac_control->zerodma_virt_addr) {
763 pci_free_consistent(nic->pdev, PAGE_SIZE,
764 mac_control->zerodma_virt_addr,
767 "%s: Freeing TxDL with zero DMA addr. ",
769 DBG_PRINT(INIT_DBG, "Virtual address %p\n",
770 mac_control->zerodma_virt_addr);
772 kfree(mac_control->fifos[i].list_info);
775 size = SIZE_OF_BLOCK;
776 for (i = 0; i < config->rx_ring_num; i++) {
777 blk_cnt = mac_control->rings[i].block_count;
778 for (j = 0; j < blk_cnt; j++) {
779 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
781 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
783 if (tmp_v_addr == NULL)
785 pci_free_consistent(nic->pdev, size,
786 tmp_v_addr, tmp_p_addr);
787 kfree(mac_control->rings[i].rx_blocks[j].rxds);
791 if (nic->rxd_mode >= RXD_MODE_3A) {
792 /* Freeing buffer storage addresses in 2BUFF mode. */
793 for (i = 0; i < config->rx_ring_num; i++) {
794 blk_cnt = config->rx_cfg[i].num_rxd /
795 (rxd_count[nic->rxd_mode] + 1);
796 for (j = 0; j < blk_cnt; j++) {
798 if (!mac_control->rings[i].ba[j])
800 while (k != rxd_count[nic->rxd_mode]) {
802 &mac_control->rings[i].ba[j][k];
807 kfree(mac_control->rings[i].ba[j]);
809 kfree(mac_control->rings[i].ba);
813 if (mac_control->stats_mem) {
814 pci_free_consistent(nic->pdev,
815 mac_control->stats_mem_sz,
816 mac_control->stats_mem,
817 mac_control->stats_mem_phy);
819 if (nic->ufo_in_band_v)
820 kfree(nic->ufo_in_band_v);
824 * s2io_verify_pci_mode -
827 static int s2io_verify_pci_mode(nic_t *nic)
829 XENA_dev_config_t __iomem *bar0 = nic->bar0;
830 register u64 val64 = 0;
833 val64 = readq(&bar0->pci_mode);
834 mode = (u8)GET_PCI_MODE(val64);
836 if ( val64 & PCI_MODE_UNKNOWN_MODE)
837 return -1; /* Unknown PCI mode */
841 #define NEC_VENID 0x1033
842 #define NEC_DEVID 0x0125
843 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
845 struct pci_dev *tdev = NULL;
846 while ((tdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
847 if ((tdev->vendor == NEC_VENID) && (tdev->device == NEC_DEVID)){
848 if (tdev->bus == s2io_pdev->bus->parent)
855 int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
857 * s2io_print_pci_mode -
859 static int s2io_print_pci_mode(nic_t *nic)
861 XENA_dev_config_t __iomem *bar0 = nic->bar0;
862 register u64 val64 = 0;
864 struct config_param *config = &nic->config;
866 val64 = readq(&bar0->pci_mode);
867 mode = (u8)GET_PCI_MODE(val64);
869 if ( val64 & PCI_MODE_UNKNOWN_MODE)
870 return -1; /* Unknown PCI mode */
872 config->bus_speed = bus_speed[mode];
874 if (s2io_on_nec_bridge(nic->pdev)) {
875 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
880 if (val64 & PCI_MODE_32_BITS) {
881 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
883 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
887 case PCI_MODE_PCI_33:
888 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
890 case PCI_MODE_PCI_66:
891 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
893 case PCI_MODE_PCIX_M1_66:
894 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
896 case PCI_MODE_PCIX_M1_100:
897 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
899 case PCI_MODE_PCIX_M1_133:
900 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
902 case PCI_MODE_PCIX_M2_66:
903 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
905 case PCI_MODE_PCIX_M2_100:
906 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
908 case PCI_MODE_PCIX_M2_133:
909 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
912 return -1; /* Unsupported bus speed */
919 * init_nic - Initialization of hardware
920 * @nic: device peivate variable
921 * Description: The function sequentially configures every block
922 * of the H/W from their reset values.
923 * Return Value: SUCCESS on success and
924 * '-1' on failure (endian settings incorrect).
927 static int init_nic(struct s2io_nic *nic)
929 XENA_dev_config_t __iomem *bar0 = nic->bar0;
930 struct net_device *dev = nic->dev;
931 register u64 val64 = 0;
935 mac_info_t *mac_control;
936 struct config_param *config;
938 unsigned long long mem_share;
941 mac_control = &nic->mac_control;
942 config = &nic->config;
944 /* to set the swapper controle on the card */
945 if(s2io_set_swapper(nic)) {
946 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
951 * Herc requires EOI to be removed from reset before XGXS, so..
953 if (nic->device_type & XFRAME_II_DEVICE) {
954 val64 = 0xA500000000ULL;
955 writeq(val64, &bar0->sw_reset);
957 val64 = readq(&bar0->sw_reset);
960 /* Remove XGXS from reset state */
962 writeq(val64, &bar0->sw_reset);
964 val64 = readq(&bar0->sw_reset);
966 /* Enable Receiving broadcasts */
967 add = &bar0->mac_cfg;
968 val64 = readq(&bar0->mac_cfg);
969 val64 |= MAC_RMAC_BCAST_ENABLE;
970 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
971 writel((u32) val64, add);
972 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
973 writel((u32) (val64 >> 32), (add + 4));
975 /* Read registers in all blocks */
976 val64 = readq(&bar0->mac_int_mask);
977 val64 = readq(&bar0->mc_int_mask);
978 val64 = readq(&bar0->xgxs_int_mask);
982 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
984 if (nic->device_type & XFRAME_II_DEVICE) {
985 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
986 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
987 &bar0->dtx_control, UF);
989 msleep(1); /* Necessary!! */
993 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
994 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
995 &bar0->dtx_control, UF);
996 val64 = readq(&bar0->dtx_control);
1001 /* Tx DMA Initialization */
1003 writeq(val64, &bar0->tx_fifo_partition_0);
1004 writeq(val64, &bar0->tx_fifo_partition_1);
1005 writeq(val64, &bar0->tx_fifo_partition_2);
1006 writeq(val64, &bar0->tx_fifo_partition_3);
1009 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1011 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
1012 13) | vBIT(config->tx_cfg[i].fifo_priority,
1015 if (i == (config->tx_fifo_num - 1)) {
1022 writeq(val64, &bar0->tx_fifo_partition_0);
1026 writeq(val64, &bar0->tx_fifo_partition_1);
1030 writeq(val64, &bar0->tx_fifo_partition_2);
1034 writeq(val64, &bar0->tx_fifo_partition_3);
1040 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1041 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1043 if ((nic->device_type == XFRAME_I_DEVICE) &&
1044 (get_xena_rev_id(nic->pdev) < 4))
1045 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1047 val64 = readq(&bar0->tx_fifo_partition_0);
1048 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1049 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1052 * Initialization of Tx_PA_CONFIG register to ignore packet
1053 * integrity checking.
1055 val64 = readq(&bar0->tx_pa_cfg);
1056 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1057 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1058 writeq(val64, &bar0->tx_pa_cfg);
1060 /* Rx DMA intialization. */
1062 for (i = 0; i < config->rx_ring_num; i++) {
1064 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1067 writeq(val64, &bar0->rx_queue_priority);
1070 * Allocating equal share of memory to all the
1074 if (nic->device_type & XFRAME_II_DEVICE)
1079 for (i = 0; i < config->rx_ring_num; i++) {
1082 mem_share = (mem_size / config->rx_ring_num +
1083 mem_size % config->rx_ring_num);
1084 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1087 mem_share = (mem_size / config->rx_ring_num);
1088 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1091 mem_share = (mem_size / config->rx_ring_num);
1092 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1095 mem_share = (mem_size / config->rx_ring_num);
1096 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1099 mem_share = (mem_size / config->rx_ring_num);
1100 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1103 mem_share = (mem_size / config->rx_ring_num);
1104 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1107 mem_share = (mem_size / config->rx_ring_num);
1108 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1111 mem_share = (mem_size / config->rx_ring_num);
1112 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1116 writeq(val64, &bar0->rx_queue_cfg);
1119 * Filling Tx round robin registers
1120 * as per the number of FIFOs
1122 switch (config->tx_fifo_num) {
1124 val64 = 0x0000000000000000ULL;
1125 writeq(val64, &bar0->tx_w_round_robin_0);
1126 writeq(val64, &bar0->tx_w_round_robin_1);
1127 writeq(val64, &bar0->tx_w_round_robin_2);
1128 writeq(val64, &bar0->tx_w_round_robin_3);
1129 writeq(val64, &bar0->tx_w_round_robin_4);
1132 val64 = 0x0000010000010000ULL;
1133 writeq(val64, &bar0->tx_w_round_robin_0);
1134 val64 = 0x0100000100000100ULL;
1135 writeq(val64, &bar0->tx_w_round_robin_1);
1136 val64 = 0x0001000001000001ULL;
1137 writeq(val64, &bar0->tx_w_round_robin_2);
1138 val64 = 0x0000010000010000ULL;
1139 writeq(val64, &bar0->tx_w_round_robin_3);
1140 val64 = 0x0100000000000000ULL;
1141 writeq(val64, &bar0->tx_w_round_robin_4);
1144 val64 = 0x0001000102000001ULL;
1145 writeq(val64, &bar0->tx_w_round_robin_0);
1146 val64 = 0x0001020000010001ULL;
1147 writeq(val64, &bar0->tx_w_round_robin_1);
1148 val64 = 0x0200000100010200ULL;
1149 writeq(val64, &bar0->tx_w_round_robin_2);
1150 val64 = 0x0001000102000001ULL;
1151 writeq(val64, &bar0->tx_w_round_robin_3);
1152 val64 = 0x0001020000000000ULL;
1153 writeq(val64, &bar0->tx_w_round_robin_4);
1156 val64 = 0x0001020300010200ULL;
1157 writeq(val64, &bar0->tx_w_round_robin_0);
1158 val64 = 0x0100000102030001ULL;
1159 writeq(val64, &bar0->tx_w_round_robin_1);
1160 val64 = 0x0200010000010203ULL;
1161 writeq(val64, &bar0->tx_w_round_robin_2);
1162 val64 = 0x0001020001000001ULL;
1163 writeq(val64, &bar0->tx_w_round_robin_3);
1164 val64 = 0x0203000100000000ULL;
1165 writeq(val64, &bar0->tx_w_round_robin_4);
1168 val64 = 0x0001000203000102ULL;
1169 writeq(val64, &bar0->tx_w_round_robin_0);
1170 val64 = 0x0001020001030004ULL;
1171 writeq(val64, &bar0->tx_w_round_robin_1);
1172 val64 = 0x0001000203000102ULL;
1173 writeq(val64, &bar0->tx_w_round_robin_2);
1174 val64 = 0x0001020001030004ULL;
1175 writeq(val64, &bar0->tx_w_round_robin_3);
1176 val64 = 0x0001000000000000ULL;
1177 writeq(val64, &bar0->tx_w_round_robin_4);
1180 val64 = 0x0001020304000102ULL;
1181 writeq(val64, &bar0->tx_w_round_robin_0);
1182 val64 = 0x0304050001020001ULL;
1183 writeq(val64, &bar0->tx_w_round_robin_1);
1184 val64 = 0x0203000100000102ULL;
1185 writeq(val64, &bar0->tx_w_round_robin_2);
1186 val64 = 0x0304000102030405ULL;
1187 writeq(val64, &bar0->tx_w_round_robin_3);
1188 val64 = 0x0001000200000000ULL;
1189 writeq(val64, &bar0->tx_w_round_robin_4);
1192 val64 = 0x0001020001020300ULL;
1193 writeq(val64, &bar0->tx_w_round_robin_0);
1194 val64 = 0x0102030400010203ULL;
1195 writeq(val64, &bar0->tx_w_round_robin_1);
1196 val64 = 0x0405060001020001ULL;
1197 writeq(val64, &bar0->tx_w_round_robin_2);
1198 val64 = 0x0304050000010200ULL;
1199 writeq(val64, &bar0->tx_w_round_robin_3);
1200 val64 = 0x0102030000000000ULL;
1201 writeq(val64, &bar0->tx_w_round_robin_4);
1204 val64 = 0x0001020300040105ULL;
1205 writeq(val64, &bar0->tx_w_round_robin_0);
1206 val64 = 0x0200030106000204ULL;
1207 writeq(val64, &bar0->tx_w_round_robin_1);
1208 val64 = 0x0103000502010007ULL;
1209 writeq(val64, &bar0->tx_w_round_robin_2);
1210 val64 = 0x0304010002060500ULL;
1211 writeq(val64, &bar0->tx_w_round_robin_3);
1212 val64 = 0x0103020400000000ULL;
1213 writeq(val64, &bar0->tx_w_round_robin_4);
1217 /* Enable Tx FIFO partition 0. */
1218 val64 = readq(&bar0->tx_fifo_partition_0);
1219 val64 |= (TX_FIFO_PARTITION_EN);
1220 writeq(val64, &bar0->tx_fifo_partition_0);
1222 /* Filling the Rx round robin registers as per the
1223 * number of Rings and steering based on QoS.
1225 switch (config->rx_ring_num) {
1227 val64 = 0x8080808080808080ULL;
1228 writeq(val64, &bar0->rts_qos_steering);
1231 val64 = 0x0000010000010000ULL;
1232 writeq(val64, &bar0->rx_w_round_robin_0);
1233 val64 = 0x0100000100000100ULL;
1234 writeq(val64, &bar0->rx_w_round_robin_1);
1235 val64 = 0x0001000001000001ULL;
1236 writeq(val64, &bar0->rx_w_round_robin_2);
1237 val64 = 0x0000010000010000ULL;
1238 writeq(val64, &bar0->rx_w_round_robin_3);
1239 val64 = 0x0100000000000000ULL;
1240 writeq(val64, &bar0->rx_w_round_robin_4);
1242 val64 = 0x8080808040404040ULL;
1243 writeq(val64, &bar0->rts_qos_steering);
1246 val64 = 0x0001000102000001ULL;
1247 writeq(val64, &bar0->rx_w_round_robin_0);
1248 val64 = 0x0001020000010001ULL;
1249 writeq(val64, &bar0->rx_w_round_robin_1);
1250 val64 = 0x0200000100010200ULL;
1251 writeq(val64, &bar0->rx_w_round_robin_2);
1252 val64 = 0x0001000102000001ULL;
1253 writeq(val64, &bar0->rx_w_round_robin_3);
1254 val64 = 0x0001020000000000ULL;
1255 writeq(val64, &bar0->rx_w_round_robin_4);
1257 val64 = 0x8080804040402020ULL;
1258 writeq(val64, &bar0->rts_qos_steering);
1261 val64 = 0x0001020300010200ULL;
1262 writeq(val64, &bar0->rx_w_round_robin_0);
1263 val64 = 0x0100000102030001ULL;
1264 writeq(val64, &bar0->rx_w_round_robin_1);
1265 val64 = 0x0200010000010203ULL;
1266 writeq(val64, &bar0->rx_w_round_robin_2);
1267 val64 = 0x0001020001000001ULL;
1268 writeq(val64, &bar0->rx_w_round_robin_3);
1269 val64 = 0x0203000100000000ULL;
1270 writeq(val64, &bar0->rx_w_round_robin_4);
1272 val64 = 0x8080404020201010ULL;
1273 writeq(val64, &bar0->rts_qos_steering);
1276 val64 = 0x0001000203000102ULL;
1277 writeq(val64, &bar0->rx_w_round_robin_0);
1278 val64 = 0x0001020001030004ULL;
1279 writeq(val64, &bar0->rx_w_round_robin_1);
1280 val64 = 0x0001000203000102ULL;
1281 writeq(val64, &bar0->rx_w_round_robin_2);
1282 val64 = 0x0001020001030004ULL;
1283 writeq(val64, &bar0->rx_w_round_robin_3);
1284 val64 = 0x0001000000000000ULL;
1285 writeq(val64, &bar0->rx_w_round_robin_4);
1287 val64 = 0x8080404020201008ULL;
1288 writeq(val64, &bar0->rts_qos_steering);
1291 val64 = 0x0001020304000102ULL;
1292 writeq(val64, &bar0->rx_w_round_robin_0);
1293 val64 = 0x0304050001020001ULL;
1294 writeq(val64, &bar0->rx_w_round_robin_1);
1295 val64 = 0x0203000100000102ULL;
1296 writeq(val64, &bar0->rx_w_round_robin_2);
1297 val64 = 0x0304000102030405ULL;
1298 writeq(val64, &bar0->rx_w_round_robin_3);
1299 val64 = 0x0001000200000000ULL;
1300 writeq(val64, &bar0->rx_w_round_robin_4);
1302 val64 = 0x8080404020100804ULL;
1303 writeq(val64, &bar0->rts_qos_steering);
1306 val64 = 0x0001020001020300ULL;
1307 writeq(val64, &bar0->rx_w_round_robin_0);
1308 val64 = 0x0102030400010203ULL;
1309 writeq(val64, &bar0->rx_w_round_robin_1);
1310 val64 = 0x0405060001020001ULL;
1311 writeq(val64, &bar0->rx_w_round_robin_2);
1312 val64 = 0x0304050000010200ULL;
1313 writeq(val64, &bar0->rx_w_round_robin_3);
1314 val64 = 0x0102030000000000ULL;
1315 writeq(val64, &bar0->rx_w_round_robin_4);
1317 val64 = 0x8080402010080402ULL;
1318 writeq(val64, &bar0->rts_qos_steering);
1321 val64 = 0x0001020300040105ULL;
1322 writeq(val64, &bar0->rx_w_round_robin_0);
1323 val64 = 0x0200030106000204ULL;
1324 writeq(val64, &bar0->rx_w_round_robin_1);
1325 val64 = 0x0103000502010007ULL;
1326 writeq(val64, &bar0->rx_w_round_robin_2);
1327 val64 = 0x0304010002060500ULL;
1328 writeq(val64, &bar0->rx_w_round_robin_3);
1329 val64 = 0x0103020400000000ULL;
1330 writeq(val64, &bar0->rx_w_round_robin_4);
1332 val64 = 0x8040201008040201ULL;
1333 writeq(val64, &bar0->rts_qos_steering);
1339 for (i = 0; i < 8; i++)
1340 writeq(val64, &bar0->rts_frm_len_n[i]);
1342 /* Set the default rts frame length for the rings configured */
1343 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1344 for (i = 0 ; i < config->rx_ring_num ; i++)
1345 writeq(val64, &bar0->rts_frm_len_n[i]);
1347 /* Set the frame length for the configured rings
1348 * desired by the user
1350 for (i = 0; i < config->rx_ring_num; i++) {
1351 /* If rts_frm_len[i] == 0 then it is assumed that user not
1352 * specified frame length steering.
1353 * If the user provides the frame length then program
1354 * the rts_frm_len register for those values or else
1355 * leave it as it is.
1357 if (rts_frm_len[i] != 0) {
1358 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1359 &bar0->rts_frm_len_n[i]);
1363 /* Program statistics memory */
1364 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1366 if (nic->device_type == XFRAME_II_DEVICE) {
1367 val64 = STAT_BC(0x320);
1368 writeq(val64, &bar0->stat_byte_cnt);
1372 * Initializing the sampling rate for the device to calculate the
1373 * bandwidth utilization.
1375 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1376 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1377 writeq(val64, &bar0->mac_link_util);
1381 * Initializing the Transmit and Receive Traffic Interrupt
1385 * TTI Initialization. Default Tx timer gets us about
1386 * 250 interrupts per sec. Continuous interrupts are enabled
1389 if (nic->device_type == XFRAME_II_DEVICE) {
1390 int count = (nic->config.bus_speed * 125)/2;
1391 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1394 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1396 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1397 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1398 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1399 if (use_continuous_tx_intrs)
1400 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1401 writeq(val64, &bar0->tti_data1_mem);
1403 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1404 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1405 TTI_DATA2_MEM_TX_UFC_C(0x70) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1406 writeq(val64, &bar0->tti_data2_mem);
1408 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1409 writeq(val64, &bar0->tti_command_mem);
1412 * Once the operation completes, the Strobe bit of the command
1413 * register will be reset. We poll for this particular condition
1414 * We wait for a maximum of 500ms for the operation to complete,
1415 * if it's not complete by then we return error.
1419 val64 = readq(&bar0->tti_command_mem);
1420 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1424 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1432 if (nic->config.bimodal) {
1434 for (k = 0; k < config->rx_ring_num; k++) {
1435 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1436 val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1437 writeq(val64, &bar0->tti_command_mem);
1440 * Once the operation completes, the Strobe bit of the command
1441 * register will be reset. We poll for this particular condition
1442 * We wait for a maximum of 500ms for the operation to complete,
1443 * if it's not complete by then we return error.
1447 val64 = readq(&bar0->tti_command_mem);
1448 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1453 "%s: TTI init Failed\n",
1463 /* RTI Initialization */
1464 if (nic->device_type == XFRAME_II_DEVICE) {
1466 * Programmed to generate Apprx 500 Intrs per
1469 int count = (nic->config.bus_speed * 125)/4;
1470 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1472 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1474 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1475 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1476 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1478 writeq(val64, &bar0->rti_data1_mem);
1480 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1481 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1482 if (nic->intr_type == MSI_X)
1483 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1484 RTI_DATA2_MEM_RX_UFC_D(0x40));
1486 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1487 RTI_DATA2_MEM_RX_UFC_D(0x80));
1488 writeq(val64, &bar0->rti_data2_mem);
1490 for (i = 0; i < config->rx_ring_num; i++) {
1491 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1492 | RTI_CMD_MEM_OFFSET(i);
1493 writeq(val64, &bar0->rti_command_mem);
1496 * Once the operation completes, the Strobe bit of the
1497 * command register will be reset. We poll for this
1498 * particular condition. We wait for a maximum of 500ms
1499 * for the operation to complete, if it's not complete
1500 * by then we return error.
1504 val64 = readq(&bar0->rti_command_mem);
1505 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1509 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1520 * Initializing proper values as Pause threshold into all
1521 * the 8 Queues on Rx side.
1523 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1524 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1526 /* Disable RMAC PAD STRIPPING */
1527 add = &bar0->mac_cfg;
1528 val64 = readq(&bar0->mac_cfg);
1529 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1530 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1531 writel((u32) (val64), add);
1532 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1533 writel((u32) (val64 >> 32), (add + 4));
1534 val64 = readq(&bar0->mac_cfg);
1536 /* Enable FCS stripping by adapter */
1537 add = &bar0->mac_cfg;
1538 val64 = readq(&bar0->mac_cfg);
1539 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1540 if (nic->device_type == XFRAME_II_DEVICE)
1541 writeq(val64, &bar0->mac_cfg);
1543 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1544 writel((u32) (val64), add);
1545 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1546 writel((u32) (val64 >> 32), (add + 4));
1550 * Set the time value to be inserted in the pause frame
1551 * generated by xena.
1553 val64 = readq(&bar0->rmac_pause_cfg);
1554 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1555 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1556 writeq(val64, &bar0->rmac_pause_cfg);
1559 * Set the Threshold Limit for Generating the pause frame
1560 * If the amount of data in any Queue exceeds ratio of
1561 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1562 * pause frame is generated
1565 for (i = 0; i < 4; i++) {
1567 (((u64) 0xFF00 | nic->mac_control.
1568 mc_pause_threshold_q0q3)
1571 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1574 for (i = 0; i < 4; i++) {
1576 (((u64) 0xFF00 | nic->mac_control.
1577 mc_pause_threshold_q4q7)
1580 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1583 * TxDMA will stop Read request if the number of read split has
1584 * exceeded the limit pointed by shared_splits
1586 val64 = readq(&bar0->pic_control);
1587 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1588 writeq(val64, &bar0->pic_control);
1590 if (nic->config.bus_speed == 266) {
1591 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1592 writeq(0x0, &bar0->read_retry_delay);
1593 writeq(0x0, &bar0->write_retry_delay);
1597 * Programming the Herc to split every write transaction
1598 * that does not start on an ADB to reduce disconnects.
1600 if (nic->device_type == XFRAME_II_DEVICE) {
1601 val64 = EXT_REQ_EN | MISC_LINK_STABILITY_PRD(3);
1602 writeq(val64, &bar0->misc_control);
1603 val64 = readq(&bar0->pic_control2);
1604 val64 &= ~(BIT(13)|BIT(14)|BIT(15));
1605 writeq(val64, &bar0->pic_control2);
1607 if (strstr(nic->product_name, "CX4")) {
1608 val64 = TMAC_AVG_IPG(0x17);
1609 writeq(val64, &bar0->tmac_avg_ipg);
1614 #define LINK_UP_DOWN_INTERRUPT 1
1615 #define MAC_RMAC_ERR_TIMER 2
1617 static int s2io_link_fault_indication(nic_t *nic)
1619 if (nic->intr_type != INTA)
1620 return MAC_RMAC_ERR_TIMER;
1621 if (nic->device_type == XFRAME_II_DEVICE)
1622 return LINK_UP_DOWN_INTERRUPT;
1624 return MAC_RMAC_ERR_TIMER;
1628 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1629 * @nic: device private variable,
1630 * @mask: A mask indicating which Intr block must be modified and,
1631 * @flag: A flag indicating whether to enable or disable the Intrs.
1632 * Description: This function will either disable or enable the interrupts
1633 * depending on the flag argument. The mask argument can be used to
1634 * enable/disable any Intr block.
1635 * Return Value: NONE.
1638 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1640 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1641 register u64 val64 = 0, temp64 = 0;
1643 /* Top level interrupt classification */
1644 /* PIC Interrupts */
1645 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1646 /* Enable PIC Intrs in the general intr mask register */
1647 val64 = TXPIC_INT_M | PIC_RX_INT_M;
1648 if (flag == ENABLE_INTRS) {
1649 temp64 = readq(&bar0->general_int_mask);
1650 temp64 &= ~((u64) val64);
1651 writeq(temp64, &bar0->general_int_mask);
1653 * If Hercules adapter enable GPIO otherwise
1654 * disabled all PCIX, Flash, MDIO, IIC and GPIO
1655 * interrupts for now.
1658 if (s2io_link_fault_indication(nic) ==
1659 LINK_UP_DOWN_INTERRUPT ) {
1660 temp64 = readq(&bar0->pic_int_mask);
1661 temp64 &= ~((u64) PIC_INT_GPIO);
1662 writeq(temp64, &bar0->pic_int_mask);
1663 temp64 = readq(&bar0->gpio_int_mask);
1664 temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP);
1665 writeq(temp64, &bar0->gpio_int_mask);
1667 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1670 * No MSI Support is available presently, so TTI and
1671 * RTI interrupts are also disabled.
1673 } else if (flag == DISABLE_INTRS) {
1675 * Disable PIC Intrs in the general
1676 * intr mask register
1678 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1679 temp64 = readq(&bar0->general_int_mask);
1681 writeq(val64, &bar0->general_int_mask);
1685 /* DMA Interrupts */
1686 /* Enabling/Disabling Tx DMA interrupts */
1687 if (mask & TX_DMA_INTR) {
1688 /* Enable TxDMA Intrs in the general intr mask register */
1689 val64 = TXDMA_INT_M;
1690 if (flag == ENABLE_INTRS) {
1691 temp64 = readq(&bar0->general_int_mask);
1692 temp64 &= ~((u64) val64);
1693 writeq(temp64, &bar0->general_int_mask);
1695 * Keep all interrupts other than PFC interrupt
1696 * and PCC interrupt disabled in DMA level.
1698 val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1700 writeq(val64, &bar0->txdma_int_mask);
1702 * Enable only the MISC error 1 interrupt in PFC block
1704 val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1705 writeq(val64, &bar0->pfc_err_mask);
1707 * Enable only the FB_ECC error interrupt in PCC block
1709 val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1710 writeq(val64, &bar0->pcc_err_mask);
1711 } else if (flag == DISABLE_INTRS) {
1713 * Disable TxDMA Intrs in the general intr mask
1716 writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1717 writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1718 temp64 = readq(&bar0->general_int_mask);
1720 writeq(val64, &bar0->general_int_mask);
1724 /* Enabling/Disabling Rx DMA interrupts */
1725 if (mask & RX_DMA_INTR) {
1726 /* Enable RxDMA Intrs in the general intr mask register */
1727 val64 = RXDMA_INT_M;
1728 if (flag == ENABLE_INTRS) {
1729 temp64 = readq(&bar0->general_int_mask);
1730 temp64 &= ~((u64) val64);
1731 writeq(temp64, &bar0->general_int_mask);
1733 * All RxDMA block interrupts are disabled for now
1736 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1737 } else if (flag == DISABLE_INTRS) {
1739 * Disable RxDMA Intrs in the general intr mask
1742 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1743 temp64 = readq(&bar0->general_int_mask);
1745 writeq(val64, &bar0->general_int_mask);
1749 /* MAC Interrupts */
1750 /* Enabling/Disabling MAC interrupts */
1751 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1752 val64 = TXMAC_INT_M | RXMAC_INT_M;
1753 if (flag == ENABLE_INTRS) {
1754 temp64 = readq(&bar0->general_int_mask);
1755 temp64 &= ~((u64) val64);
1756 writeq(temp64, &bar0->general_int_mask);
1758 * All MAC block error interrupts are disabled for now
1761 } else if (flag == DISABLE_INTRS) {
1763 * Disable MAC Intrs in the general intr mask register
1765 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1766 writeq(DISABLE_ALL_INTRS,
1767 &bar0->mac_rmac_err_mask);
1769 temp64 = readq(&bar0->general_int_mask);
1771 writeq(val64, &bar0->general_int_mask);
1775 /* XGXS Interrupts */
1776 if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1777 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1778 if (flag == ENABLE_INTRS) {
1779 temp64 = readq(&bar0->general_int_mask);
1780 temp64 &= ~((u64) val64);
1781 writeq(temp64, &bar0->general_int_mask);
1783 * All XGXS block error interrupts are disabled for now
1786 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1787 } else if (flag == DISABLE_INTRS) {
1789 * Disable MC Intrs in the general intr mask register
1791 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1792 temp64 = readq(&bar0->general_int_mask);
1794 writeq(val64, &bar0->general_int_mask);
1798 /* Memory Controller(MC) interrupts */
1799 if (mask & MC_INTR) {
1801 if (flag == ENABLE_INTRS) {
1802 temp64 = readq(&bar0->general_int_mask);
1803 temp64 &= ~((u64) val64);
1804 writeq(temp64, &bar0->general_int_mask);
1806 * Enable all MC Intrs.
1808 writeq(0x0, &bar0->mc_int_mask);
1809 writeq(0x0, &bar0->mc_err_mask);
1810 } else if (flag == DISABLE_INTRS) {
1812 * Disable MC Intrs in the general intr mask register
1814 writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1815 temp64 = readq(&bar0->general_int_mask);
1817 writeq(val64, &bar0->general_int_mask);
1822 /* Tx traffic interrupts */
1823 if (mask & TX_TRAFFIC_INTR) {
1824 val64 = TXTRAFFIC_INT_M;
1825 if (flag == ENABLE_INTRS) {
1826 temp64 = readq(&bar0->general_int_mask);
1827 temp64 &= ~((u64) val64);
1828 writeq(temp64, &bar0->general_int_mask);
1830 * Enable all the Tx side interrupts
1831 * writing 0 Enables all 64 TX interrupt levels
1833 writeq(0x0, &bar0->tx_traffic_mask);
1834 } else if (flag == DISABLE_INTRS) {
1836 * Disable Tx Traffic Intrs in the general intr mask
1839 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1840 temp64 = readq(&bar0->general_int_mask);
1842 writeq(val64, &bar0->general_int_mask);
1846 /* Rx traffic interrupts */
1847 if (mask & RX_TRAFFIC_INTR) {
1848 val64 = RXTRAFFIC_INT_M;
1849 if (flag == ENABLE_INTRS) {
1850 temp64 = readq(&bar0->general_int_mask);
1851 temp64 &= ~((u64) val64);
1852 writeq(temp64, &bar0->general_int_mask);
1853 /* writing 0 Enables all 8 RX interrupt levels */
1854 writeq(0x0, &bar0->rx_traffic_mask);
1855 } else if (flag == DISABLE_INTRS) {
1857 * Disable Rx Traffic Intrs in the general intr mask
1860 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1861 temp64 = readq(&bar0->general_int_mask);
1863 writeq(val64, &bar0->general_int_mask);
1868 static int check_prc_pcc_state(u64 val64, int flag, int rev_id, int herc)
1872 if (flag == FALSE) {
1873 if ((!herc && (rev_id >= 4)) || herc) {
1874 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1875 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1876 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1880 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1881 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1882 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1887 if ((!herc && (rev_id >= 4)) || herc) {
1888 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1889 ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1890 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1891 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1892 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1896 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1897 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1898 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1899 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1900 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1909 * verify_xena_quiescence - Checks whether the H/W is ready
1910 * @val64 : Value read from adapter status register.
1911 * @flag : indicates if the adapter enable bit was ever written once
1913 * Description: Returns whether the H/W is ready to go or not. Depending
1914 * on whether adapter enable bit was written or not the comparison
1915 * differs and the calling function passes the input argument flag to
1917 * Return: 1 If xena is quiescence
1918 * 0 If Xena is not quiescence
1921 static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag)
1924 u64 tmp64 = ~((u64) val64);
1925 int rev_id = get_xena_rev_id(sp->pdev);
1927 herc = (sp->device_type == XFRAME_II_DEVICE);
1930 (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
1931 ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY |
1932 ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
1933 ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
1934 ADAPTER_STATUS_P_PLL_LOCK))) {
1935 ret = check_prc_pcc_state(val64, flag, rev_id, herc);
1942 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1943 * @sp: Pointer to device specifc structure
1945 * New procedure to clear mac address reading problems on Alpha platforms
1949 static void fix_mac_address(nic_t * sp)
1951 XENA_dev_config_t __iomem *bar0 = sp->bar0;
1955 while (fix_mac[i] != END_SIGN) {
1956 writeq(fix_mac[i++], &bar0->gpio_control);
1958 val64 = readq(&bar0->gpio_control);
1963 * start_nic - Turns the device on
1964 * @nic : device private variable.
1966 * This function actually turns the device on. Before this function is
1967 * called,all Registers are configured from their reset states
1968 * and shared memory is allocated but the NIC is still quiescent. On
1969 * calling this function, the device interrupts are cleared and the NIC is
1970 * literally switched on by writing into the adapter control register.
1972 * SUCCESS on success and -1 on failure.
1975 static int start_nic(struct s2io_nic *nic)
1977 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1978 struct net_device *dev = nic->dev;
1979 register u64 val64 = 0;
1982 mac_info_t *mac_control;
1983 struct config_param *config;
1985 mac_control = &nic->mac_control;
1986 config = &nic->config;
1988 /* PRC Initialization and configuration */
1989 for (i = 0; i < config->rx_ring_num; i++) {
1990 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1991 &bar0->prc_rxd0_n[i]);
1993 val64 = readq(&bar0->prc_ctrl_n[i]);
1994 if (nic->config.bimodal)
1995 val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
1996 if (nic->rxd_mode == RXD_MODE_1)
1997 val64 |= PRC_CTRL_RC_ENABLED;
1999 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2000 if (nic->device_type == XFRAME_II_DEVICE)
2001 val64 |= PRC_CTRL_GROUP_READS;
2002 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2003 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2004 writeq(val64, &bar0->prc_ctrl_n[i]);
2007 if (nic->rxd_mode == RXD_MODE_3B) {
2008 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2009 val64 = readq(&bar0->rx_pa_cfg);
2010 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2011 writeq(val64, &bar0->rx_pa_cfg);
2015 * Enabling MC-RLDRAM. After enabling the device, we timeout
2016 * for around 100ms, which is approximately the time required
2017 * for the device to be ready for operation.
2019 val64 = readq(&bar0->mc_rldram_mrs);
2020 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2021 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2022 val64 = readq(&bar0->mc_rldram_mrs);
2024 msleep(100); /* Delay by around 100 ms. */
2026 /* Enabling ECC Protection. */
2027 val64 = readq(&bar0->adapter_control);
2028 val64 &= ~ADAPTER_ECC_EN;
2029 writeq(val64, &bar0->adapter_control);
2032 * Clearing any possible Link state change interrupts that
2033 * could have popped up just before Enabling the card.
2035 val64 = readq(&bar0->mac_rmac_err_reg);
2037 writeq(val64, &bar0->mac_rmac_err_reg);
2040 * Verify if the device is ready to be enabled, if so enable
2043 val64 = readq(&bar0->adapter_status);
2044 if (!verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
2045 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2046 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2047 (unsigned long long) val64);
2051 /* Enable select interrupts */
2052 if (nic->intr_type != INTA)
2053 en_dis_able_nic_intrs(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2055 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2056 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2057 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
2058 en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
2062 * With some switches, link might be already up at this point.
2063 * Because of this weird behavior, when we enable laser,
2064 * we may not get link. We need to handle this. We cannot
2065 * figure out which switch is misbehaving. So we are forced to
2066 * make a global change.
2069 /* Enabling Laser. */
2070 val64 = readq(&bar0->adapter_control);
2071 val64 |= ADAPTER_EOI_TX_ON;
2072 writeq(val64, &bar0->adapter_control);
2074 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2076 * Dont see link state interrupts initally on some switches,
2077 * so directly scheduling the link state task here.
2079 schedule_work(&nic->set_link_task);
2081 /* SXE-002: Initialize link and activity LED */
2082 subid = nic->pdev->subsystem_device;
2083 if (((subid & 0xFF) >= 0x07) &&
2084 (nic->device_type == XFRAME_I_DEVICE)) {
2085 val64 = readq(&bar0->gpio_control);
2086 val64 |= 0x0000800000000000ULL;
2087 writeq(val64, &bar0->gpio_control);
2088 val64 = 0x0411040400000000ULL;
2089 writeq(val64, (void __iomem *)bar0 + 0x2700);
2095 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2097 static struct sk_buff *s2io_txdl_getskb(fifo_info_t *fifo_data, TxD_t *txdlp, int get_off)
2099 nic_t *nic = fifo_data->nic;
2100 struct sk_buff *skb;
2105 if (txds->Host_Control == (u64)(long)nic->ufo_in_band_v) {
2106 pci_unmap_single(nic->pdev, (dma_addr_t)
2107 txds->Buffer_Pointer, sizeof(u64),
2112 skb = (struct sk_buff *) ((unsigned long)
2113 txds->Host_Control);
2115 memset(txdlp, 0, (sizeof(TxD_t) * fifo_data->max_txds));
2118 pci_unmap_single(nic->pdev, (dma_addr_t)
2119 txds->Buffer_Pointer,
2120 skb->len - skb->data_len,
2122 frg_cnt = skb_shinfo(skb)->nr_frags;
2125 for (j = 0; j < frg_cnt; j++, txds++) {
2126 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2127 if (!txds->Buffer_Pointer)
2129 pci_unmap_page(nic->pdev, (dma_addr_t)
2130 txds->Buffer_Pointer,
2131 frag->size, PCI_DMA_TODEVICE);
2134 txdlp->Host_Control = 0;
2139 * free_tx_buffers - Free all queued Tx buffers
2140 * @nic : device private variable.
2142 * Free all queued Tx buffers.
2143 * Return Value: void
2146 static void free_tx_buffers(struct s2io_nic *nic)
2148 struct net_device *dev = nic->dev;
2149 struct sk_buff *skb;
2152 mac_info_t *mac_control;
2153 struct config_param *config;
2156 mac_control = &nic->mac_control;
2157 config = &nic->config;
2159 for (i = 0; i < config->tx_fifo_num; i++) {
2160 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2161 txdp = (TxD_t *) mac_control->fifos[i].list_info[j].
2163 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2170 "%s:forcibly freeing %d skbs on FIFO%d\n",
2172 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2173 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2178 * stop_nic - To stop the nic
2179 * @nic ; device private variable.
2181 * This function does exactly the opposite of what the start_nic()
2182 * function does. This function is called to stop the device.
2187 static void stop_nic(struct s2io_nic *nic)
2189 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2190 register u64 val64 = 0;
2192 mac_info_t *mac_control;
2193 struct config_param *config;
2195 mac_control = &nic->mac_control;
2196 config = &nic->config;
2198 /* Disable all interrupts */
2199 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2200 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2201 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
2202 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2204 /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2205 val64 = readq(&bar0->adapter_control);
2206 val64 &= ~(ADAPTER_CNTL_EN);
2207 writeq(val64, &bar0->adapter_control);
2210 static int fill_rxd_3buf(nic_t *nic, RxD_t *rxdp, struct sk_buff *skb)
2212 struct net_device *dev = nic->dev;
2213 struct sk_buff *frag_list;
2216 /* Buffer-1 receives L3/L4 headers */
2217 ((RxD3_t*)rxdp)->Buffer1_ptr = pci_map_single
2218 (nic->pdev, skb->data, l3l4hdr_size + 4,
2219 PCI_DMA_FROMDEVICE);
2221 /* skb_shinfo(skb)->frag_list will have L4 data payload */
2222 skb_shinfo(skb)->frag_list = dev_alloc_skb(dev->mtu + ALIGN_SIZE);
2223 if (skb_shinfo(skb)->frag_list == NULL) {
2224 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n ", dev->name);
2227 frag_list = skb_shinfo(skb)->frag_list;
2228 frag_list->next = NULL;
2229 tmp = (void *)ALIGN((long)frag_list->data, ALIGN_SIZE + 1);
2230 frag_list->data = tmp;
2231 frag_list->tail = tmp;
2233 /* Buffer-2 receives L4 data payload */
2234 ((RxD3_t*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev,
2235 frag_list->data, dev->mtu,
2236 PCI_DMA_FROMDEVICE);
2237 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
2238 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
2244 * fill_rx_buffers - Allocates the Rx side skbs
2245 * @nic: device private variable
2246 * @ring_no: ring number
2248 * The function allocates Rx side skbs and puts the physical
2249 * address of these buffers into the RxD buffer pointers, so that the NIC
2250 * can DMA the received frame into these locations.
2251 * The NIC supports 3 receive modes, viz
2253 * 2. three buffer and
2254 * 3. Five buffer modes.
2255 * Each mode defines how many fragments the received frame will be split
2256 * up into by the NIC. The frame is split into L3 header, L4 Header,
2257 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2258 * is split into 3 fragments. As of now only single buffer mode is
2261 * SUCCESS on success or an appropriate -ve value on failure.
2264 static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2266 struct net_device *dev = nic->dev;
2267 struct sk_buff *skb;
2269 int off, off1, size, block_no, block_no1;
2272 mac_info_t *mac_control;
2273 struct config_param *config;
2276 #ifndef CONFIG_S2IO_NAPI
2277 unsigned long flags;
2279 RxD_t *first_rxdp = NULL;
2281 mac_control = &nic->mac_control;
2282 config = &nic->config;
2283 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2284 atomic_read(&nic->rx_bufs_left[ring_no]);
2286 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
2287 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2288 while (alloc_tab < alloc_cnt) {
2289 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2291 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2293 rxdp = mac_control->rings[ring_no].
2294 rx_blocks[block_no].rxds[off].virt_addr;
2296 if ((block_no == block_no1) && (off == off1) &&
2297 (rxdp->Host_Control)) {
2298 DBG_PRINT(INTR_DBG, "%s: Get and Put",
2300 DBG_PRINT(INTR_DBG, " info equated\n");
2303 if (off && (off == rxd_count[nic->rxd_mode])) {
2304 mac_control->rings[ring_no].rx_curr_put_info.
2306 if (mac_control->rings[ring_no].rx_curr_put_info.
2307 block_index == mac_control->rings[ring_no].
2309 mac_control->rings[ring_no].rx_curr_put_info.
2311 block_no = mac_control->rings[ring_no].
2312 rx_curr_put_info.block_index;
2313 if (off == rxd_count[nic->rxd_mode])
2315 mac_control->rings[ring_no].rx_curr_put_info.
2317 rxdp = mac_control->rings[ring_no].
2318 rx_blocks[block_no].block_virt_addr;
2319 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2322 #ifndef CONFIG_S2IO_NAPI
2323 spin_lock_irqsave(&nic->put_lock, flags);
2324 mac_control->rings[ring_no].put_pos =
2325 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2326 spin_unlock_irqrestore(&nic->put_lock, flags);
2328 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2329 ((nic->rxd_mode >= RXD_MODE_3A) &&
2330 (rxdp->Control_2 & BIT(0)))) {
2331 mac_control->rings[ring_no].rx_curr_put_info.
2335 /* calculate size of skb based on ring mode */
2336 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2337 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2338 if (nic->rxd_mode == RXD_MODE_1)
2339 size += NET_IP_ALIGN;
2340 else if (nic->rxd_mode == RXD_MODE_3B)
2341 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2343 size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
2346 skb = dev_alloc_skb(size);
2348 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
2349 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
2352 first_rxdp->Control_1 |= RXD_OWN_XENA;
2356 if (nic->rxd_mode == RXD_MODE_1) {
2357 /* 1 buffer mode - normal operation mode */
2358 memset(rxdp, 0, sizeof(RxD1_t));
2359 skb_reserve(skb, NET_IP_ALIGN);
2360 ((RxD1_t*)rxdp)->Buffer0_ptr = pci_map_single
2361 (nic->pdev, skb->data, size - NET_IP_ALIGN,
2362 PCI_DMA_FROMDEVICE);
2363 rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2365 } else if (nic->rxd_mode >= RXD_MODE_3A) {
2367 * 2 or 3 buffer mode -
2368 * Both 2 buffer mode and 3 buffer mode provides 128
2369 * byte aligned receive buffers.
2371 * 3 buffer mode provides header separation where in
2372 * skb->data will have L3/L4 headers where as
2373 * skb_shinfo(skb)->frag_list will have the L4 data
2377 memset(rxdp, 0, sizeof(RxD3_t));
2378 ba = &mac_control->rings[ring_no].ba[block_no][off];
2379 skb_reserve(skb, BUF0_LEN);
2380 tmp = (u64)(unsigned long) skb->data;
2383 skb->data = (void *) (unsigned long)tmp;
2384 skb->tail = (void *) (unsigned long)tmp;
2386 ((RxD3_t*)rxdp)->Buffer0_ptr =
2387 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2388 PCI_DMA_FROMDEVICE);
2389 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2390 if (nic->rxd_mode == RXD_MODE_3B) {
2391 /* Two buffer mode */
2394 * Buffer2 will have L3/L4 header plus
2397 ((RxD3_t*)rxdp)->Buffer2_ptr = pci_map_single
2398 (nic->pdev, skb->data, dev->mtu + 4,
2399 PCI_DMA_FROMDEVICE);
2401 /* Buffer-1 will be dummy buffer not used */
2402 ((RxD3_t*)rxdp)->Buffer1_ptr =
2403 pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
2404 PCI_DMA_FROMDEVICE);
2405 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2406 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2410 if (fill_rxd_3buf(nic, rxdp, skb) == -ENOMEM) {
2411 dev_kfree_skb_irq(skb);
2414 first_rxdp->Control_1 |=
2420 rxdp->Control_2 |= BIT(0);
2422 rxdp->Host_Control = (unsigned long) (skb);
2423 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2424 rxdp->Control_1 |= RXD_OWN_XENA;
2426 if (off == (rxd_count[nic->rxd_mode] + 1))
2428 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2430 rxdp->Control_2 |= SET_RXD_MARKER;
2431 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2434 first_rxdp->Control_1 |= RXD_OWN_XENA;
2438 atomic_inc(&nic->rx_bufs_left[ring_no]);
2443 /* Transfer ownership of first descriptor to adapter just before
2444 * exiting. Before that, use memory barrier so that ownership
2445 * and other fields are seen by adapter correctly.
2449 first_rxdp->Control_1 |= RXD_OWN_XENA;
2455 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2457 struct net_device *dev = sp->dev;
2459 struct sk_buff *skb;
2461 mac_info_t *mac_control;
2464 mac_control = &sp->mac_control;
2465 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2466 rxdp = mac_control->rings[ring_no].
2467 rx_blocks[blk].rxds[j].virt_addr;
2468 skb = (struct sk_buff *)
2469 ((unsigned long) rxdp->Host_Control);
2473 if (sp->rxd_mode == RXD_MODE_1) {
2474 pci_unmap_single(sp->pdev, (dma_addr_t)
2475 ((RxD1_t*)rxdp)->Buffer0_ptr,
2477 HEADER_ETHERNET_II_802_3_SIZE
2478 + HEADER_802_2_SIZE +
2480 PCI_DMA_FROMDEVICE);
2481 memset(rxdp, 0, sizeof(RxD1_t));
2482 } else if(sp->rxd_mode == RXD_MODE_3B) {
2483 ba = &mac_control->rings[ring_no].
2485 pci_unmap_single(sp->pdev, (dma_addr_t)
2486 ((RxD3_t*)rxdp)->Buffer0_ptr,
2488 PCI_DMA_FROMDEVICE);
2489 pci_unmap_single(sp->pdev, (dma_addr_t)
2490 ((RxD3_t*)rxdp)->Buffer1_ptr,
2492 PCI_DMA_FROMDEVICE);
2493 pci_unmap_single(sp->pdev, (dma_addr_t)
2494 ((RxD3_t*)rxdp)->Buffer2_ptr,
2496 PCI_DMA_FROMDEVICE);
2497 memset(rxdp, 0, sizeof(RxD3_t));
2499 pci_unmap_single(sp->pdev, (dma_addr_t)
2500 ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN,
2501 PCI_DMA_FROMDEVICE);
2502 pci_unmap_single(sp->pdev, (dma_addr_t)
2503 ((RxD3_t*)rxdp)->Buffer1_ptr,
2505 PCI_DMA_FROMDEVICE);
2506 pci_unmap_single(sp->pdev, (dma_addr_t)
2507 ((RxD3_t*)rxdp)->Buffer2_ptr, dev->mtu,
2508 PCI_DMA_FROMDEVICE);
2509 memset(rxdp, 0, sizeof(RxD3_t));
2512 atomic_dec(&sp->rx_bufs_left[ring_no]);
2517 * free_rx_buffers - Frees all Rx buffers
2518 * @sp: device private variable.
2520 * This function will free all Rx buffers allocated by host.
2525 static void free_rx_buffers(struct s2io_nic *sp)
2527 struct net_device *dev = sp->dev;
2528 int i, blk = 0, buf_cnt = 0;
2529 mac_info_t *mac_control;
2530 struct config_param *config;
2532 mac_control = &sp->mac_control;
2533 config = &sp->config;
2535 for (i = 0; i < config->rx_ring_num; i++) {
2536 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2537 free_rxd_blk(sp,i,blk);
2539 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2540 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2541 mac_control->rings[i].rx_curr_put_info.offset = 0;
2542 mac_control->rings[i].rx_curr_get_info.offset = 0;
2543 atomic_set(&sp->rx_bufs_left[i], 0);
2544 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2545 dev->name, buf_cnt, i);
2550 * s2io_poll - Rx interrupt handler for NAPI support
2551 * @dev : pointer to the device structure.
2552 * @budget : The number of packets that were budgeted to be processed
2553 * during one pass through the 'Poll" function.
2555 * Comes into picture only if NAPI support has been incorporated. It does
2556 * the same thing that rx_intr_handler does, but not in a interrupt context
2557 * also It will process only a given number of packets.
2559 * 0 on success and 1 if there are No Rx packets to be processed.
2562 #if defined(CONFIG_S2IO_NAPI)
2563 static int s2io_poll(struct net_device *dev, int *budget)
2565 nic_t *nic = dev->priv;
2566 int pkt_cnt = 0, org_pkts_to_process;
2567 mac_info_t *mac_control;
2568 struct config_param *config;
2569 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2570 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2573 atomic_inc(&nic->isr_cnt);
2574 mac_control = &nic->mac_control;
2575 config = &nic->config;
2577 nic->pkts_to_process = *budget;
2578 if (nic->pkts_to_process > dev->quota)
2579 nic->pkts_to_process = dev->quota;
2580 org_pkts_to_process = nic->pkts_to_process;
2582 writeq(val64, &bar0->rx_traffic_int);
2583 val64 = readl(&bar0->rx_traffic_int);
2585 for (i = 0; i < config->rx_ring_num; i++) {
2586 rx_intr_handler(&mac_control->rings[i]);
2587 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2588 if (!nic->pkts_to_process) {
2589 /* Quota for the current iteration has been met */
2596 dev->quota -= pkt_cnt;
2598 netif_rx_complete(dev);
2600 for (i = 0; i < config->rx_ring_num; i++) {
2601 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2602 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2603 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2607 /* Re enable the Rx interrupts. */
2608 writeq(0x0, &bar0->rx_traffic_mask);
2609 val64 = readl(&bar0->rx_traffic_mask);
2610 atomic_dec(&nic->isr_cnt);
2614 dev->quota -= pkt_cnt;
2617 for (i = 0; i < config->rx_ring_num; i++) {
2618 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2619 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2620 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2624 atomic_dec(&nic->isr_cnt);
2630 * rx_intr_handler - Rx interrupt handler
2631 * @nic: device private variable.
2633 * If the interrupt is because of a received frame or if the
2634 * receive ring contains fresh as yet un-processed frames,this function is
2635 * called. It picks out the RxD at which place the last Rx processing had
2636 * stopped and sends the skb to the OSM's Rx handler and then increments
2641 static void rx_intr_handler(ring_info_t *ring_data)
2643 nic_t *nic = ring_data->nic;
2644 struct net_device *dev = (struct net_device *) nic->dev;
2645 int get_block, put_block, put_offset;
2646 rx_curr_get_info_t get_info, put_info;
2648 struct sk_buff *skb;
2649 #ifndef CONFIG_S2IO_NAPI
2654 spin_lock(&nic->rx_lock);
2655 if (atomic_read(&nic->card_state) == CARD_DOWN) {
2656 DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n",
2657 __FUNCTION__, dev->name);
2658 spin_unlock(&nic->rx_lock);
2662 get_info = ring_data->rx_curr_get_info;
2663 get_block = get_info.block_index;
2664 put_info = ring_data->rx_curr_put_info;
2665 put_block = put_info.block_index;
2666 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2667 #ifndef CONFIG_S2IO_NAPI
2668 spin_lock(&nic->put_lock);
2669 put_offset = ring_data->put_pos;
2670 spin_unlock(&nic->put_lock);
2672 put_offset = (put_block * (rxd_count[nic->rxd_mode] + 1)) +
2675 while (RXD_IS_UP2DT(rxdp)) {
2676 /* If your are next to put index then it's FIFO full condition */
2677 if ((get_block == put_block) &&
2678 (get_info.offset + 1) == put_info.offset) {
2679 DBG_PRINT(ERR_DBG, "%s: Ring Full\n",dev->name);
2682 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2684 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2686 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2687 spin_unlock(&nic->rx_lock);
2690 if (nic->rxd_mode == RXD_MODE_1) {
2691 pci_unmap_single(nic->pdev, (dma_addr_t)
2692 ((RxD1_t*)rxdp)->Buffer0_ptr,
2694 HEADER_ETHERNET_II_802_3_SIZE +
2697 PCI_DMA_FROMDEVICE);
2698 } else if (nic->rxd_mode == RXD_MODE_3B) {
2699 pci_unmap_single(nic->pdev, (dma_addr_t)
2700 ((RxD3_t*)rxdp)->Buffer0_ptr,
2701 BUF0_LEN, PCI_DMA_FROMDEVICE);
2702 pci_unmap_single(nic->pdev, (dma_addr_t)
2703 ((RxD3_t*)rxdp)->Buffer1_ptr,
2704 BUF1_LEN, PCI_DMA_FROMDEVICE);
2705 pci_unmap_single(nic->pdev, (dma_addr_t)
2706 ((RxD3_t*)rxdp)->Buffer2_ptr,
2708 PCI_DMA_FROMDEVICE);
2710 pci_unmap_single(nic->pdev, (dma_addr_t)
2711 ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN,
2712 PCI_DMA_FROMDEVICE);
2713 pci_unmap_single(nic->pdev, (dma_addr_t)
2714 ((RxD3_t*)rxdp)->Buffer1_ptr,
2716 PCI_DMA_FROMDEVICE);
2717 pci_unmap_single(nic->pdev, (dma_addr_t)
2718 ((RxD3_t*)rxdp)->Buffer2_ptr,
2719 dev->mtu, PCI_DMA_FROMDEVICE);
2721 prefetch(skb->data);
2722 rx_osm_handler(ring_data, rxdp);
2724 ring_data->rx_curr_get_info.offset = get_info.offset;
2725 rxdp = ring_data->rx_blocks[get_block].
2726 rxds[get_info.offset].virt_addr;
2727 if (get_info.offset == rxd_count[nic->rxd_mode]) {
2728 get_info.offset = 0;
2729 ring_data->rx_curr_get_info.offset = get_info.offset;
2731 if (get_block == ring_data->block_count)
2733 ring_data->rx_curr_get_info.block_index = get_block;
2734 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2737 #ifdef CONFIG_S2IO_NAPI
2738 nic->pkts_to_process -= 1;
2739 if (!nic->pkts_to_process)
2743 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2748 /* Clear all LRO sessions before exiting */
2749 for (i=0; i<MAX_LRO_SESSIONS; i++) {
2750 lro_t *lro = &nic->lro0_n[i];
2752 update_L3L4_header(nic, lro);
2753 queue_rx_frame(lro->parent);
2754 clear_lro_session(lro);
2759 spin_unlock(&nic->rx_lock);
2763 * tx_intr_handler - Transmit interrupt handler
2764 * @nic : device private variable
2766 * If an interrupt was raised to indicate DMA complete of the
2767 * Tx packet, this function is called. It identifies the last TxD
2768 * whose buffer was freed and frees all skbs whose data have already
2769 * DMA'ed into the NICs internal memory.
2774 static void tx_intr_handler(fifo_info_t *fifo_data)
2776 nic_t *nic = fifo_data->nic;
2777 struct net_device *dev = (struct net_device *) nic->dev;
2778 tx_curr_get_info_t get_info, put_info;
2779 struct sk_buff *skb;
2782 get_info = fifo_data->tx_curr_get_info;
2783 put_info = fifo_data->tx_curr_put_info;
2784 txdlp = (TxD_t *) fifo_data->list_info[get_info.offset].
2786 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2787 (get_info.offset != put_info.offset) &&
2788 (txdlp->Host_Control)) {
2789 /* Check for TxD errors */
2790 if (txdlp->Control_1 & TXD_T_CODE) {
2791 unsigned long long err;
2792 err = txdlp->Control_1 & TXD_T_CODE;
2794 nic->mac_control.stats_info->sw_stat.
2797 if ((err >> 48) == 0xA) {
2798 DBG_PRINT(TX_DBG, "TxD returned due \
2799 to loss of link\n");
2802 DBG_PRINT(ERR_DBG, "***TxD error \
2807 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
2809 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2811 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2815 /* Updating the statistics block */
2816 nic->stats.tx_bytes += skb->len;
2817 dev_kfree_skb_irq(skb);
2820 if (get_info.offset == get_info.fifo_len + 1)
2821 get_info.offset = 0;
2822 txdlp = (TxD_t *) fifo_data->list_info
2823 [get_info.offset].list_virt_addr;
2824 fifo_data->tx_curr_get_info.offset =
2828 spin_lock(&nic->tx_lock);
2829 if (netif_queue_stopped(dev))
2830 netif_wake_queue(dev);
2831 spin_unlock(&nic->tx_lock);
2835 * s2io_mdio_write - Function to write in to MDIO registers
2836 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2837 * @addr : address value
2838 * @value : data value
2839 * @dev : pointer to net_device structure
2841 * This function is used to write values to the MDIO registers
2844 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
2847 nic_t *sp = dev->priv;
2848 XENA_dev_config_t *bar0 = (XENA_dev_config_t *)sp->bar0;
2850 //address transaction
2851 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2852 | MDIO_MMD_DEV_ADDR(mmd_type)
2853 | MDIO_MMS_PRT_ADDR(0x0);
2854 writeq(val64, &bar0->mdio_control);
2855 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2856 writeq(val64, &bar0->mdio_control);
2861 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2862 | MDIO_MMD_DEV_ADDR(mmd_type)
2863 | MDIO_MMS_PRT_ADDR(0x0)
2864 | MDIO_MDIO_DATA(value)
2865 | MDIO_OP(MDIO_OP_WRITE_TRANS);
2866 writeq(val64, &bar0->mdio_control);
2867 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2868 writeq(val64, &bar0->mdio_control);
2872 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2873 | MDIO_MMD_DEV_ADDR(mmd_type)
2874 | MDIO_MMS_PRT_ADDR(0x0)
2875 | MDIO_OP(MDIO_OP_READ_TRANS);
2876 writeq(val64, &bar0->mdio_control);
2877 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2878 writeq(val64, &bar0->mdio_control);
2884 * s2io_mdio_read - Function to write in to MDIO registers
2885 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2886 * @addr : address value
2887 * @dev : pointer to net_device structure
2889 * This function is used to read values to the MDIO registers
2892 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
2896 nic_t *sp = dev->priv;
2897 XENA_dev_config_t *bar0 = (XENA_dev_config_t *)sp->bar0;
2899 /* address transaction */
2900 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2901 | MDIO_MMD_DEV_ADDR(mmd_type)
2902 | MDIO_MMS_PRT_ADDR(0x0);
2903 writeq(val64, &bar0->mdio_control);
2904 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2905 writeq(val64, &bar0->mdio_control);
2908 /* Data transaction */
2910 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2911 | MDIO_MMD_DEV_ADDR(mmd_type)
2912 | MDIO_MMS_PRT_ADDR(0x0)
2913 | MDIO_OP(MDIO_OP_READ_TRANS);
2914 writeq(val64, &bar0->mdio_control);
2915 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2916 writeq(val64, &bar0->mdio_control);
2919 /* Read the value from regs */
2920 rval64 = readq(&bar0->mdio_control);
2921 rval64 = rval64 & 0xFFFF0000;
2922 rval64 = rval64 >> 16;
2926 * s2io_chk_xpak_counter - Function to check the status of the xpak counters
2927 * @counter : couter value to be updated
2928 * @flag : flag to indicate the status
2929 * @type : counter type
2931 * This function is to check the status of the xpak counters value
2935 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
2940 for(i = 0; i <index; i++)
2945 *counter = *counter + 1;
2946 val64 = *regs_stat & mask;
2947 val64 = val64 >> (index * 0x2);
2954 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
2955 "service. Excessive temperatures may "
2956 "result in premature transceiver "
2960 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
2961 "service Excessive bias currents may "
2962 "indicate imminent laser diode "
2966 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
2967 "service Excessive laser output "
2968 "power may saturate far-end "
2972 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
2977 val64 = val64 << (index * 0x2);
2978 *regs_stat = (*regs_stat & (~mask)) | (val64);
2981 *regs_stat = *regs_stat & (~mask);
2986 * s2io_updt_xpak_counter - Function to update the xpak counters
2987 * @dev : pointer to net_device struct
2989 * This function is to upate the status of the xpak counters value
2992 static void s2io_updt_xpak_counter(struct net_device *dev)
3000 nic_t *sp = dev->priv;
3001 StatInfo_t *stat_info = sp->mac_control.stats_info;
3003 /* Check the communication with the MDIO slave */
3006 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3007 if((val64 == 0xFFFF) || (val64 == 0x0000))
3009 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3010 "Returned %llx\n", (unsigned long long)val64);
3014 /* Check for the expecte value of 2040 at PMA address 0x0000 */
3017 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3018 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3019 (unsigned long long)val64);
3023 /* Loading the DOM register to MDIO register */
3025 s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3026 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3028 /* Reading the Alarm flags */
3031 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3033 flag = CHECKBIT(val64, 0x7);
3035 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3036 &stat_info->xpak_stat.xpak_regs_stat,
3039 if(CHECKBIT(val64, 0x6))
3040 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3042 flag = CHECKBIT(val64, 0x3);
3044 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3045 &stat_info->xpak_stat.xpak_regs_stat,
3048 if(CHECKBIT(val64, 0x2))
3049 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3051 flag = CHECKBIT(val64, 0x1);
3053 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3054 &stat_info->xpak_stat.xpak_regs_stat,
3057 if(CHECKBIT(val64, 0x0))
3058 stat_info->xpak_stat.alarm_laser_output_power_low++;
3060 /* Reading the Warning flags */
3063 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3065 if(CHECKBIT(val64, 0x7))
3066 stat_info->xpak_stat.warn_transceiver_temp_high++;
3068 if(CHECKBIT(val64, 0x6))
3069 stat_info->xpak_stat.warn_transceiver_temp_low++;
3071 if(CHECKBIT(val64, 0x3))
3072 stat_info->xpak_stat.warn_laser_bias_current_high++;
3074 if(CHECKBIT(val64, 0x2))
3075 stat_info->xpak_stat.warn_laser_bias_current_low++;
3077 if(CHECKBIT(val64, 0x1))
3078 stat_info->xpak_stat.warn_laser_output_power_high++;
3080 if(CHECKBIT(val64, 0x0))
3081 stat_info->xpak_stat.warn_laser_output_power_low++;
3085 * alarm_intr_handler - Alarm Interrrupt handler
3086 * @nic: device private variable
3087 * Description: If the interrupt was neither because of Rx packet or Tx
3088 * complete, this function is called. If the interrupt was to indicate
3089 * a loss of link, the OSM link status handler is invoked for any other
3090 * alarm interrupt the block that raised the interrupt is displayed
3091 * and a H/W reset is issued.
3096 static void alarm_intr_handler(struct s2io_nic *nic)
3098 struct net_device *dev = (struct net_device *) nic->dev;
3099 XENA_dev_config_t __iomem *bar0 = nic->bar0;
3100 register u64 val64 = 0, err_reg = 0;
3103 nic->mac_control.stats_info->sw_stat.ring_full_cnt = 0;
3104 /* Handling the XPAK counters update */
3105 if(nic->mac_control.stats_info->xpak_stat.xpak_timer_count < 72000) {
3106 /* waiting for an hour */
3107 nic->mac_control.stats_info->xpak_stat.xpak_timer_count++;
3109 s2io_updt_xpak_counter(dev);
3110 /* reset the count to zero */
3111 nic->mac_control.stats_info->xpak_stat.xpak_timer_count = 0;
3114 /* Handling link status change error Intr */
3115 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
3116 err_reg = readq(&bar0->mac_rmac_err_reg);
3117 writeq(err_reg, &bar0->mac_rmac_err_reg);
3118 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
3119 schedule_work(&nic->set_link_task);