1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2005 Neterion Inc.
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
30 * rx_ring_num : This can be used to program the number of receive rings used
32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
33 * This is also an array of size 8.
34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35 * values are 1, 2 and 3.
36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
38 * Tx descriptors that can be associated with each corresponding FIFO.
39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40 * 1(MSI), 2(MSI_X). Default value is '0(INTA)'
41 * lro: Specifies whether to enable Large Receive Offload (LRO) or not.
42 * Possible values '1' for enable '0' for disable. Default is '0'
43 * lro_max_pkts: This parameter defines maximum number of packets can be
44 * aggregated as a single large packet
45 ************************************************************************/
47 #include <linux/module.h>
48 #include <linux/types.h>
49 #include <linux/errno.h>
50 #include <linux/ioport.h>
51 #include <linux/pci.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/kernel.h>
54 #include <linux/netdevice.h>
55 #include <linux/etherdevice.h>
56 #include <linux/skbuff.h>
57 #include <linux/init.h>
58 #include <linux/delay.h>
59 #include <linux/stddef.h>
60 #include <linux/ioctl.h>
61 #include <linux/timex.h>
62 #include <linux/sched.h>
63 #include <linux/ethtool.h>
64 #include <linux/workqueue.h>
65 #include <linux/if_vlan.h>
67 #include <linux/tcp.h>
70 #include <asm/system.h>
71 #include <asm/uaccess.h>
73 #include <asm/div64.h>
77 #include "s2io-regs.h"
79 #define DRV_VERSION "2.0.14.2"
81 /* S2io Driver name & version. */
82 static char s2io_driver_name[] = "Neterion";
83 static char s2io_driver_version[] = DRV_VERSION;
85 static int rxd_size[4] = {32,48,48,64};
86 static int rxd_count[4] = {127,85,85,63};
88 static inline int RXD_IS_UP2DT(RxD_t *rxdp)
92 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
93 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
99 * Cards with following subsystem_id have a link state indication
100 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
101 * macro below identifies these cards given the subsystem_id.
103 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
104 (dev_type == XFRAME_I_DEVICE) ? \
105 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
106 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
108 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
109 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
110 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
113 static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
115 mac_info_t *mac_control;
117 mac_control = &sp->mac_control;
118 if (rxb_size <= rxd_count[sp->rxd_mode])
120 else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
125 /* Ethtool related variables and Macros. */
126 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
127 "Register test\t(offline)",
128 "Eeprom test\t(offline)",
129 "Link test\t(online)",
130 "RLDRAM test\t(offline)",
131 "BIST Test\t(offline)"
134 static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
136 {"tmac_data_octets"},
140 {"tmac_pause_ctrl_frms"},
144 {"tmac_any_err_frms"},
145 {"tmac_ttl_less_fb_octets"},
146 {"tmac_vld_ip_octets"},
154 {"rmac_data_octets"},
155 {"rmac_fcs_err_frms"},
157 {"rmac_vld_mcst_frms"},
158 {"rmac_vld_bcst_frms"},
159 {"rmac_in_rng_len_err_frms"},
160 {"rmac_out_rng_len_err_frms"},
162 {"rmac_pause_ctrl_frms"},
163 {"rmac_unsup_ctrl_frms"},
165 {"rmac_accepted_ucst_frms"},
166 {"rmac_accepted_nucst_frms"},
167 {"rmac_discarded_frms"},
168 {"rmac_drop_events"},
169 {"rmac_ttl_less_fb_octets"},
171 {"rmac_usized_frms"},
172 {"rmac_osized_frms"},
174 {"rmac_jabber_frms"},
175 {"rmac_ttl_64_frms"},
176 {"rmac_ttl_65_127_frms"},
177 {"rmac_ttl_128_255_frms"},
178 {"rmac_ttl_256_511_frms"},
179 {"rmac_ttl_512_1023_frms"},
180 {"rmac_ttl_1024_1518_frms"},
188 {"rmac_err_drp_udp"},
189 {"rmac_xgmii_err_sym"},
207 {"rmac_xgmii_data_err_cnt"},
208 {"rmac_xgmii_ctrl_err_cnt"},
209 {"rmac_accepted_ip"},
213 {"new_rd_req_rtry_cnt"},
215 {"wr_rtry_rd_ack_cnt"},
218 {"new_wr_req_rtry_cnt"},
221 {"rd_rtry_wr_ack_cnt"},
229 {"rmac_ttl_1519_4095_frms"},
230 {"rmac_ttl_4096_8191_frms"},
231 {"rmac_ttl_8192_max_frms"},
232 {"rmac_ttl_gt_max_frms"},
233 {"rmac_osized_alt_frms"},
234 {"rmac_jabber_alt_frms"},
235 {"rmac_gt_max_alt_frms"},
237 {"rmac_len_discard"},
238 {"rmac_fcs_discard"},
241 {"rmac_red_discard"},
242 {"rmac_rts_discard"},
243 {"rmac_ingm_full_discard"},
245 {"\n DRIVER STATISTICS"},
246 {"single_bit_ecc_errs"},
247 {"double_bit_ecc_errs"},
253 ("alarm_transceiver_temp_high"),
254 ("alarm_transceiver_temp_low"),
255 ("alarm_laser_bias_current_high"),
256 ("alarm_laser_bias_current_low"),
257 ("alarm_laser_output_power_high"),
258 ("alarm_laser_output_power_low"),
259 ("warn_transceiver_temp_high"),
260 ("warn_transceiver_temp_low"),
261 ("warn_laser_bias_current_high"),
262 ("warn_laser_bias_current_low"),
263 ("warn_laser_output_power_high"),
264 ("warn_laser_output_power_low"),
265 ("lro_aggregated_pkts"),
266 ("lro_flush_both_count"),
267 ("lro_out_of_sequence_pkts"),
268 ("lro_flush_due_to_max_pkts"),
269 ("lro_avg_aggr_pkts"),
272 #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
273 #define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
275 #define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
276 #define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
278 #define S2IO_TIMER_CONF(timer, handle, arg, exp) \
279 init_timer(&timer); \
280 timer.function = handle; \
281 timer.data = (unsigned long) arg; \
282 mod_timer(&timer, (jiffies + exp)) \
285 static void s2io_vlan_rx_register(struct net_device *dev,
286 struct vlan_group *grp)
288 nic_t *nic = dev->priv;
291 spin_lock_irqsave(&nic->tx_lock, flags);
293 spin_unlock_irqrestore(&nic->tx_lock, flags);
296 /* Unregister the vlan */
297 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
299 nic_t *nic = dev->priv;
302 spin_lock_irqsave(&nic->tx_lock, flags);
304 nic->vlgrp->vlan_devices[vid] = NULL;
305 spin_unlock_irqrestore(&nic->tx_lock, flags);
309 * Constants to be programmed into the Xena's registers, to configure
314 static const u64 herc_act_dtx_cfg[] = {
316 0x8000051536750000ULL, 0x80000515367500E0ULL,
318 0x8000051536750004ULL, 0x80000515367500E4ULL,
320 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
322 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
324 0x801205150D440000ULL, 0x801205150D4400E0ULL,
326 0x801205150D440004ULL, 0x801205150D4400E4ULL,
328 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
330 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
335 static const u64 xena_dtx_cfg[] = {
337 0x8000051500000000ULL, 0x80000515000000E0ULL,
339 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
341 0x8001051500000000ULL, 0x80010515000000E0ULL,
343 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
345 0x8002051500000000ULL, 0x80020515000000E0ULL,
347 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
352 * Constants for Fixing the MacAddress problem seen mostly on
355 static const u64 fix_mac[] = {
356 0x0060000000000000ULL, 0x0060600000000000ULL,
357 0x0040600000000000ULL, 0x0000600000000000ULL,
358 0x0020600000000000ULL, 0x0060600000000000ULL,
359 0x0020600000000000ULL, 0x0060600000000000ULL,
360 0x0020600000000000ULL, 0x0060600000000000ULL,
361 0x0020600000000000ULL, 0x0060600000000000ULL,
362 0x0020600000000000ULL, 0x0060600000000000ULL,
363 0x0020600000000000ULL, 0x0060600000000000ULL,
364 0x0020600000000000ULL, 0x0060600000000000ULL,
365 0x0020600000000000ULL, 0x0060600000000000ULL,
366 0x0020600000000000ULL, 0x0060600000000000ULL,
367 0x0020600000000000ULL, 0x0060600000000000ULL,
368 0x0020600000000000ULL, 0x0000600000000000ULL,
369 0x0040600000000000ULL, 0x0060600000000000ULL,
373 /* Module Loadable parameters. */
374 static unsigned int tx_fifo_num = 1;
375 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
376 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
377 static unsigned int rx_ring_num = 1;
378 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
379 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
380 static unsigned int rts_frm_len[MAX_RX_RINGS] =
381 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
382 static unsigned int rx_ring_mode = 1;
383 static unsigned int use_continuous_tx_intrs = 1;
384 static unsigned int rmac_pause_time = 0x100;
385 static unsigned int mc_pause_threshold_q0q3 = 187;
386 static unsigned int mc_pause_threshold_q4q7 = 187;
387 static unsigned int shared_splits;
388 static unsigned int tmac_util_period = 5;
389 static unsigned int rmac_util_period = 5;
390 static unsigned int bimodal = 0;
391 static unsigned int l3l4hdr_size = 128;
392 #ifndef CONFIG_S2IO_NAPI
393 static unsigned int indicate_max_pkts;
395 /* Frequency of Rx desc syncs expressed as power of 2 */
396 static unsigned int rxsync_frequency = 3;
397 /* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */
398 static unsigned int intr_type = 0;
399 /* Large receive offload feature */
400 static unsigned int lro = 0;
401 /* Max pkts to be aggregated by LRO at one time. If not specified,
402 * aggregation happens until we hit max IP pkt size(64K)
404 static unsigned int lro_max_pkts = 0xFFFF;
408 * This table lists all the devices that this driver supports.
410 static struct pci_device_id s2io_tbl[] __devinitdata = {
411 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
412 PCI_ANY_ID, PCI_ANY_ID},
413 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
414 PCI_ANY_ID, PCI_ANY_ID},
415 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
416 PCI_ANY_ID, PCI_ANY_ID},
417 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
418 PCI_ANY_ID, PCI_ANY_ID},
422 MODULE_DEVICE_TABLE(pci, s2io_tbl);
424 static struct pci_driver s2io_driver = {
426 .id_table = s2io_tbl,
427 .probe = s2io_init_nic,
428 .remove = __devexit_p(s2io_rem_nic),
431 /* A simplifier macro used both by init and free shared_mem Fns(). */
432 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
435 * init_shared_mem - Allocation and Initialization of Memory
436 * @nic: Device private variable.
437 * Description: The function allocates all the memory areas shared
438 * between the NIC and the driver. This includes Tx descriptors,
439 * Rx descriptors and the statistics block.
442 static int init_shared_mem(struct s2io_nic *nic)
445 void *tmp_v_addr, *tmp_v_addr_next;
446 dma_addr_t tmp_p_addr, tmp_p_addr_next;
447 RxD_block_t *pre_rxd_blk = NULL;
448 int i, j, blk_cnt, rx_sz, tx_sz;
449 int lst_size, lst_per_page;
450 struct net_device *dev = nic->dev;
454 mac_info_t *mac_control;
455 struct config_param *config;
457 mac_control = &nic->mac_control;
458 config = &nic->config;
461 /* Allocation and initialization of TXDLs in FIOFs */
463 for (i = 0; i < config->tx_fifo_num; i++) {
464 size += config->tx_cfg[i].fifo_len;
466 if (size > MAX_AVAILABLE_TXDS) {
467 DBG_PRINT(ERR_DBG, "%s: Requested TxDs too high, ",
469 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
473 lst_size = (sizeof(TxD_t) * config->max_txds);
474 tx_sz = lst_size * size;
475 lst_per_page = PAGE_SIZE / lst_size;
477 for (i = 0; i < config->tx_fifo_num; i++) {
478 int fifo_len = config->tx_cfg[i].fifo_len;
479 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
480 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
482 if (!mac_control->fifos[i].list_info) {
484 "Malloc failed for list_info\n");
487 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
489 for (i = 0; i < config->tx_fifo_num; i++) {
490 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
492 mac_control->fifos[i].tx_curr_put_info.offset = 0;
493 mac_control->fifos[i].tx_curr_put_info.fifo_len =
494 config->tx_cfg[i].fifo_len - 1;
495 mac_control->fifos[i].tx_curr_get_info.offset = 0;
496 mac_control->fifos[i].tx_curr_get_info.fifo_len =
497 config->tx_cfg[i].fifo_len - 1;
498 mac_control->fifos[i].fifo_no = i;
499 mac_control->fifos[i].nic = nic;
500 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
502 for (j = 0; j < page_num; j++) {
506 tmp_v = pci_alloc_consistent(nic->pdev,
510 "pci_alloc_consistent ");
511 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
514 /* If we got a zero DMA address(can happen on
515 * certain platforms like PPC), reallocate.
516 * Store virtual address of page we don't want,
520 mac_control->zerodma_virt_addr = tmp_v;
522 "%s: Zero DMA address for TxDL. ", dev->name);
524 "Virtual address %p\n", tmp_v);
525 tmp_v = pci_alloc_consistent(nic->pdev,
529 "pci_alloc_consistent ");
530 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
534 while (k < lst_per_page) {
535 int l = (j * lst_per_page) + k;
536 if (l == config->tx_cfg[i].fifo_len)
538 mac_control->fifos[i].list_info[l].list_virt_addr =
539 tmp_v + (k * lst_size);
540 mac_control->fifos[i].list_info[l].list_phy_addr =
541 tmp_p + (k * lst_size);
547 nic->ufo_in_band_v = kmalloc((sizeof(u64) * size), GFP_KERNEL);
548 if (!nic->ufo_in_band_v)
551 /* Allocation and initialization of RXDs in Rings */
553 for (i = 0; i < config->rx_ring_num; i++) {
554 if (config->rx_cfg[i].num_rxd %
555 (rxd_count[nic->rxd_mode] + 1)) {
556 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
557 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
559 DBG_PRINT(ERR_DBG, "RxDs per Block");
562 size += config->rx_cfg[i].num_rxd;
563 mac_control->rings[i].block_count =
564 config->rx_cfg[i].num_rxd /
565 (rxd_count[nic->rxd_mode] + 1 );
566 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
567 mac_control->rings[i].block_count;
569 if (nic->rxd_mode == RXD_MODE_1)
570 size = (size * (sizeof(RxD1_t)));
572 size = (size * (sizeof(RxD3_t)));
575 for (i = 0; i < config->rx_ring_num; i++) {
576 mac_control->rings[i].rx_curr_get_info.block_index = 0;
577 mac_control->rings[i].rx_curr_get_info.offset = 0;
578 mac_control->rings[i].rx_curr_get_info.ring_len =
579 config->rx_cfg[i].num_rxd - 1;
580 mac_control->rings[i].rx_curr_put_info.block_index = 0;
581 mac_control->rings[i].rx_curr_put_info.offset = 0;
582 mac_control->rings[i].rx_curr_put_info.ring_len =
583 config->rx_cfg[i].num_rxd - 1;
584 mac_control->rings[i].nic = nic;
585 mac_control->rings[i].ring_no = i;
587 blk_cnt = config->rx_cfg[i].num_rxd /
588 (rxd_count[nic->rxd_mode] + 1);
589 /* Allocating all the Rx blocks */
590 for (j = 0; j < blk_cnt; j++) {
591 rx_block_info_t *rx_blocks;
594 rx_blocks = &mac_control->rings[i].rx_blocks[j];
595 size = SIZE_OF_BLOCK; //size is always page size
596 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
598 if (tmp_v_addr == NULL) {
600 * In case of failure, free_shared_mem()
601 * is called, which should free any
602 * memory that was alloced till the
605 rx_blocks->block_virt_addr = tmp_v_addr;
608 memset(tmp_v_addr, 0, size);
609 rx_blocks->block_virt_addr = tmp_v_addr;
610 rx_blocks->block_dma_addr = tmp_p_addr;
611 rx_blocks->rxds = kmalloc(sizeof(rxd_info_t)*
612 rxd_count[nic->rxd_mode],
614 for (l=0; l<rxd_count[nic->rxd_mode];l++) {
615 rx_blocks->rxds[l].virt_addr =
616 rx_blocks->block_virt_addr +
617 (rxd_size[nic->rxd_mode] * l);
618 rx_blocks->rxds[l].dma_addr =
619 rx_blocks->block_dma_addr +
620 (rxd_size[nic->rxd_mode] * l);
623 /* Interlinking all Rx Blocks */
624 for (j = 0; j < blk_cnt; j++) {
626 mac_control->rings[i].rx_blocks[j].block_virt_addr;
628 mac_control->rings[i].rx_blocks[(j + 1) %
629 blk_cnt].block_virt_addr;
631 mac_control->rings[i].rx_blocks[j].block_dma_addr;
633 mac_control->rings[i].rx_blocks[(j + 1) %
634 blk_cnt].block_dma_addr;
636 pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
637 pre_rxd_blk->reserved_2_pNext_RxD_block =
638 (unsigned long) tmp_v_addr_next;
639 pre_rxd_blk->pNext_RxD_Blk_physical =
640 (u64) tmp_p_addr_next;
643 if (nic->rxd_mode >= RXD_MODE_3A) {
645 * Allocation of Storages for buffer addresses in 2BUFF mode
646 * and the buffers as well.
648 for (i = 0; i < config->rx_ring_num; i++) {
649 blk_cnt = config->rx_cfg[i].num_rxd /
650 (rxd_count[nic->rxd_mode]+ 1);
651 mac_control->rings[i].ba =
652 kmalloc((sizeof(buffAdd_t *) * blk_cnt),
654 if (!mac_control->rings[i].ba)
656 for (j = 0; j < blk_cnt; j++) {
658 mac_control->rings[i].ba[j] =
659 kmalloc((sizeof(buffAdd_t) *
660 (rxd_count[nic->rxd_mode] + 1)),
662 if (!mac_control->rings[i].ba[j])
664 while (k != rxd_count[nic->rxd_mode]) {
665 ba = &mac_control->rings[i].ba[j][k];
667 ba->ba_0_org = (void *) kmalloc
668 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
671 tmp = (unsigned long)ba->ba_0_org;
673 tmp &= ~((unsigned long) ALIGN_SIZE);
674 ba->ba_0 = (void *) tmp;
676 ba->ba_1_org = (void *) kmalloc
677 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
680 tmp = (unsigned long) ba->ba_1_org;
682 tmp &= ~((unsigned long) ALIGN_SIZE);
683 ba->ba_1 = (void *) tmp;
690 /* Allocation and initialization of Statistics block */
691 size = sizeof(StatInfo_t);
692 mac_control->stats_mem = pci_alloc_consistent
693 (nic->pdev, size, &mac_control->stats_mem_phy);
695 if (!mac_control->stats_mem) {
697 * In case of failure, free_shared_mem() is called, which
698 * should free any memory that was alloced till the
703 mac_control->stats_mem_sz = size;
705 tmp_v_addr = mac_control->stats_mem;
706 mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
707 memset(tmp_v_addr, 0, size);
708 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
709 (unsigned long long) tmp_p_addr);
715 * free_shared_mem - Free the allocated Memory
716 * @nic: Device private variable.
717 * Description: This function is to free all memory locations allocated by
718 * the init_shared_mem() function and return it to the kernel.
721 static void free_shared_mem(struct s2io_nic *nic)
723 int i, j, blk_cnt, size;
725 dma_addr_t tmp_p_addr;
726 mac_info_t *mac_control;
727 struct config_param *config;
728 int lst_size, lst_per_page;
729 struct net_device *dev = nic->dev;
734 mac_control = &nic->mac_control;
735 config = &nic->config;
737 lst_size = (sizeof(TxD_t) * config->max_txds);
738 lst_per_page = PAGE_SIZE / lst_size;
740 for (i = 0; i < config->tx_fifo_num; i++) {
741 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
743 for (j = 0; j < page_num; j++) {
744 int mem_blks = (j * lst_per_page);
745 if (!mac_control->fifos[i].list_info)
747 if (!mac_control->fifos[i].list_info[mem_blks].
750 pci_free_consistent(nic->pdev, PAGE_SIZE,
751 mac_control->fifos[i].
754 mac_control->fifos[i].
758 /* If we got a zero DMA address during allocation,
761 if (mac_control->zerodma_virt_addr) {
762 pci_free_consistent(nic->pdev, PAGE_SIZE,
763 mac_control->zerodma_virt_addr,
766 "%s: Freeing TxDL with zero DMA addr. ",
768 DBG_PRINT(INIT_DBG, "Virtual address %p\n",
769 mac_control->zerodma_virt_addr);
771 kfree(mac_control->fifos[i].list_info);
774 size = SIZE_OF_BLOCK;
775 for (i = 0; i < config->rx_ring_num; i++) {
776 blk_cnt = mac_control->rings[i].block_count;
777 for (j = 0; j < blk_cnt; j++) {
778 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
780 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
782 if (tmp_v_addr == NULL)
784 pci_free_consistent(nic->pdev, size,
785 tmp_v_addr, tmp_p_addr);
786 kfree(mac_control->rings[i].rx_blocks[j].rxds);
790 if (nic->rxd_mode >= RXD_MODE_3A) {
791 /* Freeing buffer storage addresses in 2BUFF mode. */
792 for (i = 0; i < config->rx_ring_num; i++) {
793 blk_cnt = config->rx_cfg[i].num_rxd /
794 (rxd_count[nic->rxd_mode] + 1);
795 for (j = 0; j < blk_cnt; j++) {
797 if (!mac_control->rings[i].ba[j])
799 while (k != rxd_count[nic->rxd_mode]) {
801 &mac_control->rings[i].ba[j][k];
806 kfree(mac_control->rings[i].ba[j]);
808 kfree(mac_control->rings[i].ba);
812 if (mac_control->stats_mem) {
813 pci_free_consistent(nic->pdev,
814 mac_control->stats_mem_sz,
815 mac_control->stats_mem,
816 mac_control->stats_mem_phy);
818 if (nic->ufo_in_band_v)
819 kfree(nic->ufo_in_band_v);
823 * s2io_verify_pci_mode -
826 static int s2io_verify_pci_mode(nic_t *nic)
828 XENA_dev_config_t __iomem *bar0 = nic->bar0;
829 register u64 val64 = 0;
832 val64 = readq(&bar0->pci_mode);
833 mode = (u8)GET_PCI_MODE(val64);
835 if ( val64 & PCI_MODE_UNKNOWN_MODE)
836 return -1; /* Unknown PCI mode */
840 #define NEC_VENID 0x1033
841 #define NEC_DEVID 0x0125
842 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
844 struct pci_dev *tdev = NULL;
845 while ((tdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
846 if ((tdev->vendor == NEC_VENID) && (tdev->device == NEC_DEVID)){
847 if (tdev->bus == s2io_pdev->bus->parent)
854 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
856 * s2io_print_pci_mode -
858 static int s2io_print_pci_mode(nic_t *nic)
860 XENA_dev_config_t __iomem *bar0 = nic->bar0;
861 register u64 val64 = 0;
863 struct config_param *config = &nic->config;
865 val64 = readq(&bar0->pci_mode);
866 mode = (u8)GET_PCI_MODE(val64);
868 if ( val64 & PCI_MODE_UNKNOWN_MODE)
869 return -1; /* Unknown PCI mode */
871 config->bus_speed = bus_speed[mode];
873 if (s2io_on_nec_bridge(nic->pdev)) {
874 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
879 if (val64 & PCI_MODE_32_BITS) {
880 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
882 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
886 case PCI_MODE_PCI_33:
887 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
889 case PCI_MODE_PCI_66:
890 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
892 case PCI_MODE_PCIX_M1_66:
893 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
895 case PCI_MODE_PCIX_M1_100:
896 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
898 case PCI_MODE_PCIX_M1_133:
899 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
901 case PCI_MODE_PCIX_M2_66:
902 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
904 case PCI_MODE_PCIX_M2_100:
905 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
907 case PCI_MODE_PCIX_M2_133:
908 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
911 return -1; /* Unsupported bus speed */
918 * init_nic - Initialization of hardware
919 * @nic: device peivate variable
920 * Description: The function sequentially configures every block
921 * of the H/W from their reset values.
922 * Return Value: SUCCESS on success and
923 * '-1' on failure (endian settings incorrect).
926 static int init_nic(struct s2io_nic *nic)
928 XENA_dev_config_t __iomem *bar0 = nic->bar0;
929 struct net_device *dev = nic->dev;
930 register u64 val64 = 0;
934 mac_info_t *mac_control;
935 struct config_param *config;
937 unsigned long long mem_share;
940 mac_control = &nic->mac_control;
941 config = &nic->config;
943 /* to set the swapper controle on the card */
944 if(s2io_set_swapper(nic)) {
945 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
950 * Herc requires EOI to be removed from reset before XGXS, so..
952 if (nic->device_type & XFRAME_II_DEVICE) {
953 val64 = 0xA500000000ULL;
954 writeq(val64, &bar0->sw_reset);
956 val64 = readq(&bar0->sw_reset);
959 /* Remove XGXS from reset state */
961 writeq(val64, &bar0->sw_reset);
963 val64 = readq(&bar0->sw_reset);
965 /* Enable Receiving broadcasts */
966 add = &bar0->mac_cfg;
967 val64 = readq(&bar0->mac_cfg);
968 val64 |= MAC_RMAC_BCAST_ENABLE;
969 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
970 writel((u32) val64, add);
971 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
972 writel((u32) (val64 >> 32), (add + 4));
974 /* Read registers in all blocks */
975 val64 = readq(&bar0->mac_int_mask);
976 val64 = readq(&bar0->mc_int_mask);
977 val64 = readq(&bar0->xgxs_int_mask);
981 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
983 if (nic->device_type & XFRAME_II_DEVICE) {
984 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
985 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
986 &bar0->dtx_control, UF);
988 msleep(1); /* Necessary!! */
992 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
993 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
994 &bar0->dtx_control, UF);
995 val64 = readq(&bar0->dtx_control);
1000 /* Tx DMA Initialization */
1002 writeq(val64, &bar0->tx_fifo_partition_0);
1003 writeq(val64, &bar0->tx_fifo_partition_1);
1004 writeq(val64, &bar0->tx_fifo_partition_2);
1005 writeq(val64, &bar0->tx_fifo_partition_3);
1008 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1010 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
1011 13) | vBIT(config->tx_cfg[i].fifo_priority,
1014 if (i == (config->tx_fifo_num - 1)) {
1021 writeq(val64, &bar0->tx_fifo_partition_0);
1025 writeq(val64, &bar0->tx_fifo_partition_1);
1029 writeq(val64, &bar0->tx_fifo_partition_2);
1033 writeq(val64, &bar0->tx_fifo_partition_3);
1039 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1040 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1042 if ((nic->device_type == XFRAME_I_DEVICE) &&
1043 (get_xena_rev_id(nic->pdev) < 4))
1044 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1046 val64 = readq(&bar0->tx_fifo_partition_0);
1047 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1048 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1051 * Initialization of Tx_PA_CONFIG register to ignore packet
1052 * integrity checking.
1054 val64 = readq(&bar0->tx_pa_cfg);
1055 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1056 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1057 writeq(val64, &bar0->tx_pa_cfg);
1059 /* Rx DMA intialization. */
1061 for (i = 0; i < config->rx_ring_num; i++) {
1063 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1066 writeq(val64, &bar0->rx_queue_priority);
1069 * Allocating equal share of memory to all the
1073 if (nic->device_type & XFRAME_II_DEVICE)
1078 for (i = 0; i < config->rx_ring_num; i++) {
1081 mem_share = (mem_size / config->rx_ring_num +
1082 mem_size % config->rx_ring_num);
1083 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1086 mem_share = (mem_size / config->rx_ring_num);
1087 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1090 mem_share = (mem_size / config->rx_ring_num);
1091 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1094 mem_share = (mem_size / config->rx_ring_num);
1095 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1098 mem_share = (mem_size / config->rx_ring_num);
1099 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1102 mem_share = (mem_size / config->rx_ring_num);
1103 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1106 mem_share = (mem_size / config->rx_ring_num);
1107 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1110 mem_share = (mem_size / config->rx_ring_num);
1111 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1115 writeq(val64, &bar0->rx_queue_cfg);
1118 * Filling Tx round robin registers
1119 * as per the number of FIFOs
1121 switch (config->tx_fifo_num) {
1123 val64 = 0x0000000000000000ULL;
1124 writeq(val64, &bar0->tx_w_round_robin_0);
1125 writeq(val64, &bar0->tx_w_round_robin_1);
1126 writeq(val64, &bar0->tx_w_round_robin_2);
1127 writeq(val64, &bar0->tx_w_round_robin_3);
1128 writeq(val64, &bar0->tx_w_round_robin_4);
1131 val64 = 0x0000010000010000ULL;
1132 writeq(val64, &bar0->tx_w_round_robin_0);
1133 val64 = 0x0100000100000100ULL;
1134 writeq(val64, &bar0->tx_w_round_robin_1);
1135 val64 = 0x0001000001000001ULL;
1136 writeq(val64, &bar0->tx_w_round_robin_2);
1137 val64 = 0x0000010000010000ULL;
1138 writeq(val64, &bar0->tx_w_round_robin_3);
1139 val64 = 0x0100000000000000ULL;
1140 writeq(val64, &bar0->tx_w_round_robin_4);
1143 val64 = 0x0001000102000001ULL;
1144 writeq(val64, &bar0->tx_w_round_robin_0);
1145 val64 = 0x0001020000010001ULL;
1146 writeq(val64, &bar0->tx_w_round_robin_1);
1147 val64 = 0x0200000100010200ULL;
1148 writeq(val64, &bar0->tx_w_round_robin_2);
1149 val64 = 0x0001000102000001ULL;
1150 writeq(val64, &bar0->tx_w_round_robin_3);
1151 val64 = 0x0001020000000000ULL;
1152 writeq(val64, &bar0->tx_w_round_robin_4);
1155 val64 = 0x0001020300010200ULL;
1156 writeq(val64, &bar0->tx_w_round_robin_0);
1157 val64 = 0x0100000102030001ULL;
1158 writeq(val64, &bar0->tx_w_round_robin_1);
1159 val64 = 0x0200010000010203ULL;
1160 writeq(val64, &bar0->tx_w_round_robin_2);
1161 val64 = 0x0001020001000001ULL;
1162 writeq(val64, &bar0->tx_w_round_robin_3);
1163 val64 = 0x0203000100000000ULL;
1164 writeq(val64, &bar0->tx_w_round_robin_4);
1167 val64 = 0x0001000203000102ULL;
1168 writeq(val64, &bar0->tx_w_round_robin_0);
1169 val64 = 0x0001020001030004ULL;
1170 writeq(val64, &bar0->tx_w_round_robin_1);
1171 val64 = 0x0001000203000102ULL;
1172 writeq(val64, &bar0->tx_w_round_robin_2);
1173 val64 = 0x0001020001030004ULL;
1174 writeq(val64, &bar0->tx_w_round_robin_3);
1175 val64 = 0x0001000000000000ULL;
1176 writeq(val64, &bar0->tx_w_round_robin_4);
1179 val64 = 0x0001020304000102ULL;
1180 writeq(val64, &bar0->tx_w_round_robin_0);
1181 val64 = 0x0304050001020001ULL;
1182 writeq(val64, &bar0->tx_w_round_robin_1);
1183 val64 = 0x0203000100000102ULL;
1184 writeq(val64, &bar0->tx_w_round_robin_2);
1185 val64 = 0x0304000102030405ULL;
1186 writeq(val64, &bar0->tx_w_round_robin_3);
1187 val64 = 0x0001000200000000ULL;
1188 writeq(val64, &bar0->tx_w_round_robin_4);
1191 val64 = 0x0001020001020300ULL;
1192 writeq(val64, &bar0->tx_w_round_robin_0);
1193 val64 = 0x0102030400010203ULL;
1194 writeq(val64, &bar0->tx_w_round_robin_1);
1195 val64 = 0x0405060001020001ULL;
1196 writeq(val64, &bar0->tx_w_round_robin_2);
1197 val64 = 0x0304050000010200ULL;
1198 writeq(val64, &bar0->tx_w_round_robin_3);
1199 val64 = 0x0102030000000000ULL;
1200 writeq(val64, &bar0->tx_w_round_robin_4);
1203 val64 = 0x0001020300040105ULL;
1204 writeq(val64, &bar0->tx_w_round_robin_0);
1205 val64 = 0x0200030106000204ULL;
1206 writeq(val64, &bar0->tx_w_round_robin_1);
1207 val64 = 0x0103000502010007ULL;
1208 writeq(val64, &bar0->tx_w_round_robin_2);
1209 val64 = 0x0304010002060500ULL;
1210 writeq(val64, &bar0->tx_w_round_robin_3);
1211 val64 = 0x0103020400000000ULL;
1212 writeq(val64, &bar0->tx_w_round_robin_4);
1216 /* Enable Tx FIFO partition 0. */
1217 val64 = readq(&bar0->tx_fifo_partition_0);
1218 val64 |= (TX_FIFO_PARTITION_EN);
1219 writeq(val64, &bar0->tx_fifo_partition_0);
1221 /* Filling the Rx round robin registers as per the
1222 * number of Rings and steering based on QoS.
1224 switch (config->rx_ring_num) {
1226 val64 = 0x8080808080808080ULL;
1227 writeq(val64, &bar0->rts_qos_steering);
1230 val64 = 0x0000010000010000ULL;
1231 writeq(val64, &bar0->rx_w_round_robin_0);
1232 val64 = 0x0100000100000100ULL;
1233 writeq(val64, &bar0->rx_w_round_robin_1);
1234 val64 = 0x0001000001000001ULL;
1235 writeq(val64, &bar0->rx_w_round_robin_2);
1236 val64 = 0x0000010000010000ULL;
1237 writeq(val64, &bar0->rx_w_round_robin_3);
1238 val64 = 0x0100000000000000ULL;
1239 writeq(val64, &bar0->rx_w_round_robin_4);
1241 val64 = 0x8080808040404040ULL;
1242 writeq(val64, &bar0->rts_qos_steering);
1245 val64 = 0x0001000102000001ULL;
1246 writeq(val64, &bar0->rx_w_round_robin_0);
1247 val64 = 0x0001020000010001ULL;
1248 writeq(val64, &bar0->rx_w_round_robin_1);
1249 val64 = 0x0200000100010200ULL;
1250 writeq(val64, &bar0->rx_w_round_robin_2);
1251 val64 = 0x0001000102000001ULL;
1252 writeq(val64, &bar0->rx_w_round_robin_3);
1253 val64 = 0x0001020000000000ULL;
1254 writeq(val64, &bar0->rx_w_round_robin_4);
1256 val64 = 0x8080804040402020ULL;
1257 writeq(val64, &bar0->rts_qos_steering);
1260 val64 = 0x0001020300010200ULL;
1261 writeq(val64, &bar0->rx_w_round_robin_0);
1262 val64 = 0x0100000102030001ULL;
1263 writeq(val64, &bar0->rx_w_round_robin_1);
1264 val64 = 0x0200010000010203ULL;
1265 writeq(val64, &bar0->rx_w_round_robin_2);
1266 val64 = 0x0001020001000001ULL;
1267 writeq(val64, &bar0->rx_w_round_robin_3);
1268 val64 = 0x0203000100000000ULL;
1269 writeq(val64, &bar0->rx_w_round_robin_4);
1271 val64 = 0x8080404020201010ULL;
1272 writeq(val64, &bar0->rts_qos_steering);
1275 val64 = 0x0001000203000102ULL;
1276 writeq(val64, &bar0->rx_w_round_robin_0);
1277 val64 = 0x0001020001030004ULL;
1278 writeq(val64, &bar0->rx_w_round_robin_1);
1279 val64 = 0x0001000203000102ULL;
1280 writeq(val64, &bar0->rx_w_round_robin_2);
1281 val64 = 0x0001020001030004ULL;
1282 writeq(val64, &bar0->rx_w_round_robin_3);
1283 val64 = 0x0001000000000000ULL;
1284 writeq(val64, &bar0->rx_w_round_robin_4);
1286 val64 = 0x8080404020201008ULL;
1287 writeq(val64, &bar0->rts_qos_steering);
1290 val64 = 0x0001020304000102ULL;
1291 writeq(val64, &bar0->rx_w_round_robin_0);
1292 val64 = 0x0304050001020001ULL;
1293 writeq(val64, &bar0->rx_w_round_robin_1);
1294 val64 = 0x0203000100000102ULL;
1295 writeq(val64, &bar0->rx_w_round_robin_2);
1296 val64 = 0x0304000102030405ULL;
1297 writeq(val64, &bar0->rx_w_round_robin_3);
1298 val64 = 0x0001000200000000ULL;
1299 writeq(val64, &bar0->rx_w_round_robin_4);
1301 val64 = 0x8080404020100804ULL;
1302 writeq(val64, &bar0->rts_qos_steering);
1305 val64 = 0x0001020001020300ULL;
1306 writeq(val64, &bar0->rx_w_round_robin_0);
1307 val64 = 0x0102030400010203ULL;
1308 writeq(val64, &bar0->rx_w_round_robin_1);
1309 val64 = 0x0405060001020001ULL;
1310 writeq(val64, &bar0->rx_w_round_robin_2);
1311 val64 = 0x0304050000010200ULL;
1312 writeq(val64, &bar0->rx_w_round_robin_3);
1313 val64 = 0x0102030000000000ULL;
1314 writeq(val64, &bar0->rx_w_round_robin_4);
1316 val64 = 0x8080402010080402ULL;
1317 writeq(val64, &bar0->rts_qos_steering);
1320 val64 = 0x0001020300040105ULL;
1321 writeq(val64, &bar0->rx_w_round_robin_0);
1322 val64 = 0x0200030106000204ULL;
1323 writeq(val64, &bar0->rx_w_round_robin_1);
1324 val64 = 0x0103000502010007ULL;
1325 writeq(val64, &bar0->rx_w_round_robin_2);
1326 val64 = 0x0304010002060500ULL;
1327 writeq(val64, &bar0->rx_w_round_robin_3);
1328 val64 = 0x0103020400000000ULL;
1329 writeq(val64, &bar0->rx_w_round_robin_4);
1331 val64 = 0x8040201008040201ULL;
1332 writeq(val64, &bar0->rts_qos_steering);
1338 for (i = 0; i < 8; i++)
1339 writeq(val64, &bar0->rts_frm_len_n[i]);
1341 /* Set the default rts frame length for the rings configured */
1342 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1343 for (i = 0 ; i < config->rx_ring_num ; i++)
1344 writeq(val64, &bar0->rts_frm_len_n[i]);
1346 /* Set the frame length for the configured rings
1347 * desired by the user
1349 for (i = 0; i < config->rx_ring_num; i++) {
1350 /* If rts_frm_len[i] == 0 then it is assumed that user not
1351 * specified frame length steering.
1352 * If the user provides the frame length then program
1353 * the rts_frm_len register for those values or else
1354 * leave it as it is.
1356 if (rts_frm_len[i] != 0) {
1357 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1358 &bar0->rts_frm_len_n[i]);
1362 /* Program statistics memory */
1363 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1365 if (nic->device_type == XFRAME_II_DEVICE) {
1366 val64 = STAT_BC(0x320);
1367 writeq(val64, &bar0->stat_byte_cnt);
1371 * Initializing the sampling rate for the device to calculate the
1372 * bandwidth utilization.
1374 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1375 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1376 writeq(val64, &bar0->mac_link_util);
1380 * Initializing the Transmit and Receive Traffic Interrupt
1384 * TTI Initialization. Default Tx timer gets us about
1385 * 250 interrupts per sec. Continuous interrupts are enabled
1388 if (nic->device_type == XFRAME_II_DEVICE) {
1389 int count = (nic->config.bus_speed * 125)/2;
1390 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1393 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1395 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1396 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1397 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1398 if (use_continuous_tx_intrs)
1399 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1400 writeq(val64, &bar0->tti_data1_mem);
1402 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1403 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1404 TTI_DATA2_MEM_TX_UFC_C(0x70) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1405 writeq(val64, &bar0->tti_data2_mem);
1407 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1408 writeq(val64, &bar0->tti_command_mem);
1411 * Once the operation completes, the Strobe bit of the command
1412 * register will be reset. We poll for this particular condition
1413 * We wait for a maximum of 500ms for the operation to complete,
1414 * if it's not complete by then we return error.
1418 val64 = readq(&bar0->tti_command_mem);
1419 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1423 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1431 if (nic->config.bimodal) {
1433 for (k = 0; k < config->rx_ring_num; k++) {
1434 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1435 val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1436 writeq(val64, &bar0->tti_command_mem);
1439 * Once the operation completes, the Strobe bit of the command
1440 * register will be reset. We poll for this particular condition
1441 * We wait for a maximum of 500ms for the operation to complete,
1442 * if it's not complete by then we return error.
1446 val64 = readq(&bar0->tti_command_mem);
1447 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1452 "%s: TTI init Failed\n",
1462 /* RTI Initialization */
1463 if (nic->device_type == XFRAME_II_DEVICE) {
1465 * Programmed to generate Apprx 500 Intrs per
1468 int count = (nic->config.bus_speed * 125)/4;
1469 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1471 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1473 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1474 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1475 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1477 writeq(val64, &bar0->rti_data1_mem);
1479 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1480 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1481 if (nic->intr_type == MSI_X)
1482 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1483 RTI_DATA2_MEM_RX_UFC_D(0x40));
1485 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1486 RTI_DATA2_MEM_RX_UFC_D(0x80));
1487 writeq(val64, &bar0->rti_data2_mem);
1489 for (i = 0; i < config->rx_ring_num; i++) {
1490 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1491 | RTI_CMD_MEM_OFFSET(i);
1492 writeq(val64, &bar0->rti_command_mem);
1495 * Once the operation completes, the Strobe bit of the
1496 * command register will be reset. We poll for this
1497 * particular condition. We wait for a maximum of 500ms
1498 * for the operation to complete, if it's not complete
1499 * by then we return error.
1503 val64 = readq(&bar0->rti_command_mem);
1504 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1508 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1519 * Initializing proper values as Pause threshold into all
1520 * the 8 Queues on Rx side.
1522 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1523 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1525 /* Disable RMAC PAD STRIPPING */
1526 add = &bar0->mac_cfg;
1527 val64 = readq(&bar0->mac_cfg);
1528 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1529 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1530 writel((u32) (val64), add);
1531 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1532 writel((u32) (val64 >> 32), (add + 4));
1533 val64 = readq(&bar0->mac_cfg);
1535 /* Enable FCS stripping by adapter */
1536 add = &bar0->mac_cfg;
1537 val64 = readq(&bar0->mac_cfg);
1538 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1539 if (nic->device_type == XFRAME_II_DEVICE)
1540 writeq(val64, &bar0->mac_cfg);
1542 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1543 writel((u32) (val64), add);
1544 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1545 writel((u32) (val64 >> 32), (add + 4));
1549 * Set the time value to be inserted in the pause frame
1550 * generated by xena.
1552 val64 = readq(&bar0->rmac_pause_cfg);
1553 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1554 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1555 writeq(val64, &bar0->rmac_pause_cfg);
1558 * Set the Threshold Limit for Generating the pause frame
1559 * If the amount of data in any Queue exceeds ratio of
1560 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1561 * pause frame is generated
1564 for (i = 0; i < 4; i++) {
1566 (((u64) 0xFF00 | nic->mac_control.
1567 mc_pause_threshold_q0q3)
1570 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1573 for (i = 0; i < 4; i++) {
1575 (((u64) 0xFF00 | nic->mac_control.
1576 mc_pause_threshold_q4q7)
1579 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1582 * TxDMA will stop Read request if the number of read split has
1583 * exceeded the limit pointed by shared_splits
1585 val64 = readq(&bar0->pic_control);
1586 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1587 writeq(val64, &bar0->pic_control);
1589 if (nic->config.bus_speed == 266) {
1590 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1591 writeq(0x0, &bar0->read_retry_delay);
1592 writeq(0x0, &bar0->write_retry_delay);
1596 * Programming the Herc to split every write transaction
1597 * that does not start on an ADB to reduce disconnects.
1599 if (nic->device_type == XFRAME_II_DEVICE) {
1600 val64 = EXT_REQ_EN | MISC_LINK_STABILITY_PRD(3);
1601 writeq(val64, &bar0->misc_control);
1602 val64 = readq(&bar0->pic_control2);
1603 val64 &= ~(BIT(13)|BIT(14)|BIT(15));
1604 writeq(val64, &bar0->pic_control2);
1606 if (strstr(nic->product_name, "CX4")) {
1607 val64 = TMAC_AVG_IPG(0x17);
1608 writeq(val64, &bar0->tmac_avg_ipg);
1613 #define LINK_UP_DOWN_INTERRUPT 1
1614 #define MAC_RMAC_ERR_TIMER 2
1616 static int s2io_link_fault_indication(nic_t *nic)
1618 if (nic->intr_type != INTA)
1619 return MAC_RMAC_ERR_TIMER;
1620 if (nic->device_type == XFRAME_II_DEVICE)
1621 return LINK_UP_DOWN_INTERRUPT;
1623 return MAC_RMAC_ERR_TIMER;
1627 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1628 * @nic: device private variable,
1629 * @mask: A mask indicating which Intr block must be modified and,
1630 * @flag: A flag indicating whether to enable or disable the Intrs.
1631 * Description: This function will either disable or enable the interrupts
1632 * depending on the flag argument. The mask argument can be used to
1633 * enable/disable any Intr block.
1634 * Return Value: NONE.
1637 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1639 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1640 register u64 val64 = 0, temp64 = 0;
1642 /* Top level interrupt classification */
1643 /* PIC Interrupts */
1644 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1645 /* Enable PIC Intrs in the general intr mask register */
1646 val64 = TXPIC_INT_M | PIC_RX_INT_M;
1647 if (flag == ENABLE_INTRS) {
1648 temp64 = readq(&bar0->general_int_mask);
1649 temp64 &= ~((u64) val64);
1650 writeq(temp64, &bar0->general_int_mask);
1652 * If Hercules adapter enable GPIO otherwise
1653 * disabled all PCIX, Flash, MDIO, IIC and GPIO
1654 * interrupts for now.
1657 if (s2io_link_fault_indication(nic) ==
1658 LINK_UP_DOWN_INTERRUPT ) {
1659 temp64 = readq(&bar0->pic_int_mask);
1660 temp64 &= ~((u64) PIC_INT_GPIO);
1661 writeq(temp64, &bar0->pic_int_mask);
1662 temp64 = readq(&bar0->gpio_int_mask);
1663 temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP);
1664 writeq(temp64, &bar0->gpio_int_mask);
1666 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1669 * No MSI Support is available presently, so TTI and
1670 * RTI interrupts are also disabled.
1672 } else if (flag == DISABLE_INTRS) {
1674 * Disable PIC Intrs in the general
1675 * intr mask register
1677 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1678 temp64 = readq(&bar0->general_int_mask);
1680 writeq(val64, &bar0->general_int_mask);
1684 /* DMA Interrupts */
1685 /* Enabling/Disabling Tx DMA interrupts */
1686 if (mask & TX_DMA_INTR) {
1687 /* Enable TxDMA Intrs in the general intr mask register */
1688 val64 = TXDMA_INT_M;
1689 if (flag == ENABLE_INTRS) {
1690 temp64 = readq(&bar0->general_int_mask);
1691 temp64 &= ~((u64) val64);
1692 writeq(temp64, &bar0->general_int_mask);
1694 * Keep all interrupts other than PFC interrupt
1695 * and PCC interrupt disabled in DMA level.
1697 val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1699 writeq(val64, &bar0->txdma_int_mask);
1701 * Enable only the MISC error 1 interrupt in PFC block
1703 val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1704 writeq(val64, &bar0->pfc_err_mask);
1706 * Enable only the FB_ECC error interrupt in PCC block
1708 val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1709 writeq(val64, &bar0->pcc_err_mask);
1710 } else if (flag == DISABLE_INTRS) {
1712 * Disable TxDMA Intrs in the general intr mask
1715 writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1716 writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1717 temp64 = readq(&bar0->general_int_mask);
1719 writeq(val64, &bar0->general_int_mask);
1723 /* Enabling/Disabling Rx DMA interrupts */
1724 if (mask & RX_DMA_INTR) {
1725 /* Enable RxDMA Intrs in the general intr mask register */
1726 val64 = RXDMA_INT_M;
1727 if (flag == ENABLE_INTRS) {
1728 temp64 = readq(&bar0->general_int_mask);
1729 temp64 &= ~((u64) val64);
1730 writeq(temp64, &bar0->general_int_mask);
1732 * All RxDMA block interrupts are disabled for now
1735 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1736 } else if (flag == DISABLE_INTRS) {
1738 * Disable RxDMA Intrs in the general intr mask
1741 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1742 temp64 = readq(&bar0->general_int_mask);
1744 writeq(val64, &bar0->general_int_mask);
1748 /* MAC Interrupts */
1749 /* Enabling/Disabling MAC interrupts */
1750 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1751 val64 = TXMAC_INT_M | RXMAC_INT_M;
1752 if (flag == ENABLE_INTRS) {
1753 temp64 = readq(&bar0->general_int_mask);
1754 temp64 &= ~((u64) val64);
1755 writeq(temp64, &bar0->general_int_mask);
1757 * All MAC block error interrupts are disabled for now
1760 } else if (flag == DISABLE_INTRS) {
1762 * Disable MAC Intrs in the general intr mask register
1764 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1765 writeq(DISABLE_ALL_INTRS,
1766 &bar0->mac_rmac_err_mask);
1768 temp64 = readq(&bar0->general_int_mask);
1770 writeq(val64, &bar0->general_int_mask);
1774 /* XGXS Interrupts */
1775 if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1776 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1777 if (flag == ENABLE_INTRS) {
1778 temp64 = readq(&bar0->general_int_mask);
1779 temp64 &= ~((u64) val64);
1780 writeq(temp64, &bar0->general_int_mask);
1782 * All XGXS block error interrupts are disabled for now
1785 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1786 } else if (flag == DISABLE_INTRS) {
1788 * Disable MC Intrs in the general intr mask register
1790 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1791 temp64 = readq(&bar0->general_int_mask);
1793 writeq(val64, &bar0->general_int_mask);
1797 /* Memory Controller(MC) interrupts */
1798 if (mask & MC_INTR) {
1800 if (flag == ENABLE_INTRS) {
1801 temp64 = readq(&bar0->general_int_mask);
1802 temp64 &= ~((u64) val64);
1803 writeq(temp64, &bar0->general_int_mask);
1805 * Enable all MC Intrs.
1807 writeq(0x0, &bar0->mc_int_mask);
1808 writeq(0x0, &bar0->mc_err_mask);
1809 } else if (flag == DISABLE_INTRS) {
1811 * Disable MC Intrs in the general intr mask register
1813 writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1814 temp64 = readq(&bar0->general_int_mask);
1816 writeq(val64, &bar0->general_int_mask);
1821 /* Tx traffic interrupts */
1822 if (mask & TX_TRAFFIC_INTR) {
1823 val64 = TXTRAFFIC_INT_M;
1824 if (flag == ENABLE_INTRS) {
1825 temp64 = readq(&bar0->general_int_mask);
1826 temp64 &= ~((u64) val64);
1827 writeq(temp64, &bar0->general_int_mask);
1829 * Enable all the Tx side interrupts
1830 * writing 0 Enables all 64 TX interrupt levels
1832 writeq(0x0, &bar0->tx_traffic_mask);
1833 } else if (flag == DISABLE_INTRS) {
1835 * Disable Tx Traffic Intrs in the general intr mask
1838 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1839 temp64 = readq(&bar0->general_int_mask);
1841 writeq(val64, &bar0->general_int_mask);
1845 /* Rx traffic interrupts */
1846 if (mask & RX_TRAFFIC_INTR) {
1847 val64 = RXTRAFFIC_INT_M;
1848 if (flag == ENABLE_INTRS) {
1849 temp64 = readq(&bar0->general_int_mask);
1850 temp64 &= ~((u64) val64);
1851 writeq(temp64, &bar0->general_int_mask);
1852 /* writing 0 Enables all 8 RX interrupt levels */
1853 writeq(0x0, &bar0->rx_traffic_mask);
1854 } else if (flag == DISABLE_INTRS) {
1856 * Disable Rx Traffic Intrs in the general intr mask
1859 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1860 temp64 = readq(&bar0->general_int_mask);
1862 writeq(val64, &bar0->general_int_mask);
1867 static int check_prc_pcc_state(u64 val64, int flag, int rev_id, int herc)
1871 if (flag == FALSE) {
1872 if ((!herc && (rev_id >= 4)) || herc) {
1873 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1874 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1875 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1879 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1880 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1881 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1886 if ((!herc && (rev_id >= 4)) || herc) {
1887 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1888 ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1889 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1890 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1891 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1895 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1896 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1897 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1898 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1899 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1908 * verify_xena_quiescence - Checks whether the H/W is ready
1909 * @val64 : Value read from adapter status register.
1910 * @flag : indicates if the adapter enable bit was ever written once
1912 * Description: Returns whether the H/W is ready to go or not. Depending
1913 * on whether adapter enable bit was written or not the comparison
1914 * differs and the calling function passes the input argument flag to
1916 * Return: 1 If xena is quiescence
1917 * 0 If Xena is not quiescence
1920 static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag)
1923 u64 tmp64 = ~((u64) val64);
1924 int rev_id = get_xena_rev_id(sp->pdev);
1926 herc = (sp->device_type == XFRAME_II_DEVICE);
1929 (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
1930 ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY |
1931 ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
1932 ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
1933 ADAPTER_STATUS_P_PLL_LOCK))) {
1934 ret = check_prc_pcc_state(val64, flag, rev_id, herc);
1941 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1942 * @sp: Pointer to device specifc structure
1944 * New procedure to clear mac address reading problems on Alpha platforms
1948 static void fix_mac_address(nic_t * sp)
1950 XENA_dev_config_t __iomem *bar0 = sp->bar0;
1954 while (fix_mac[i] != END_SIGN) {
1955 writeq(fix_mac[i++], &bar0->gpio_control);
1957 val64 = readq(&bar0->gpio_control);
1962 * start_nic - Turns the device on
1963 * @nic : device private variable.
1965 * This function actually turns the device on. Before this function is
1966 * called,all Registers are configured from their reset states
1967 * and shared memory is allocated but the NIC is still quiescent. On
1968 * calling this function, the device interrupts are cleared and the NIC is
1969 * literally switched on by writing into the adapter control register.
1971 * SUCCESS on success and -1 on failure.
1974 static int start_nic(struct s2io_nic *nic)
1976 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1977 struct net_device *dev = nic->dev;
1978 register u64 val64 = 0;
1981 mac_info_t *mac_control;
1982 struct config_param *config;
1984 mac_control = &nic->mac_control;
1985 config = &nic->config;
1987 /* PRC Initialization and configuration */
1988 for (i = 0; i < config->rx_ring_num; i++) {
1989 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1990 &bar0->prc_rxd0_n[i]);
1992 val64 = readq(&bar0->prc_ctrl_n[i]);
1993 if (nic->config.bimodal)
1994 val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
1995 if (nic->rxd_mode == RXD_MODE_1)
1996 val64 |= PRC_CTRL_RC_ENABLED;
1998 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1999 if (nic->device_type == XFRAME_II_DEVICE)
2000 val64 |= PRC_CTRL_GROUP_READS;
2001 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2002 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2003 writeq(val64, &bar0->prc_ctrl_n[i]);
2006 if (nic->rxd_mode == RXD_MODE_3B) {
2007 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2008 val64 = readq(&bar0->rx_pa_cfg);
2009 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2010 writeq(val64, &bar0->rx_pa_cfg);
2014 * Enabling MC-RLDRAM. After enabling the device, we timeout
2015 * for around 100ms, which is approximately the time required
2016 * for the device to be ready for operation.
2018 val64 = readq(&bar0->mc_rldram_mrs);
2019 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2020 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2021 val64 = readq(&bar0->mc_rldram_mrs);
2023 msleep(100); /* Delay by around 100 ms. */
2025 /* Enabling ECC Protection. */
2026 val64 = readq(&bar0->adapter_control);
2027 val64 &= ~ADAPTER_ECC_EN;
2028 writeq(val64, &bar0->adapter_control);
2031 * Clearing any possible Link state change interrupts that
2032 * could have popped up just before Enabling the card.
2034 val64 = readq(&bar0->mac_rmac_err_reg);
2036 writeq(val64, &bar0->mac_rmac_err_reg);
2039 * Verify if the device is ready to be enabled, if so enable
2042 val64 = readq(&bar0->adapter_status);
2043 if (!verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
2044 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2045 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2046 (unsigned long long) val64);
2050 /* Enable select interrupts */
2051 if (nic->intr_type != INTA)
2052 en_dis_able_nic_intrs(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2054 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2055 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2056 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
2057 en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
2061 * With some switches, link might be already up at this point.
2062 * Because of this weird behavior, when we enable laser,
2063 * we may not get link. We need to handle this. We cannot
2064 * figure out which switch is misbehaving. So we are forced to
2065 * make a global change.
2068 /* Enabling Laser. */
2069 val64 = readq(&bar0->adapter_control);
2070 val64 |= ADAPTER_EOI_TX_ON;
2071 writeq(val64, &bar0->adapter_control);
2073 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2075 * Dont see link state interrupts initally on some switches,
2076 * so directly scheduling the link state task here.
2078 schedule_work(&nic->set_link_task);
2080 /* SXE-002: Initialize link and activity LED */
2081 subid = nic->pdev->subsystem_device;
2082 if (((subid & 0xFF) >= 0x07) &&
2083 (nic->device_type == XFRAME_I_DEVICE)) {
2084 val64 = readq(&bar0->gpio_control);
2085 val64 |= 0x0000800000000000ULL;
2086 writeq(val64, &bar0->gpio_control);
2087 val64 = 0x0411040400000000ULL;
2088 writeq(val64, (void __iomem *)bar0 + 0x2700);
2094 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2096 static struct sk_buff *s2io_txdl_getskb(fifo_info_t *fifo_data, TxD_t *txdlp, int get_off)
2098 nic_t *nic = fifo_data->nic;
2099 struct sk_buff *skb;
2104 if (txds->Host_Control == (u64)(long)nic->ufo_in_band_v) {
2105 pci_unmap_single(nic->pdev, (dma_addr_t)
2106 txds->Buffer_Pointer, sizeof(u64),
2111 skb = (struct sk_buff *) ((unsigned long)
2112 txds->Host_Control);
2114 memset(txdlp, 0, (sizeof(TxD_t) * fifo_data->max_txds));
2117 pci_unmap_single(nic->pdev, (dma_addr_t)
2118 txds->Buffer_Pointer,
2119 skb->len - skb->data_len,
2121 frg_cnt = skb_shinfo(skb)->nr_frags;
2124 for (j = 0; j < frg_cnt; j++, txds++) {
2125 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2126 if (!txds->Buffer_Pointer)
2128 pci_unmap_page(nic->pdev, (dma_addr_t)
2129 txds->Buffer_Pointer,
2130 frag->size, PCI_DMA_TODEVICE);
2133 txdlp->Host_Control = 0;
2138 * free_tx_buffers - Free all queued Tx buffers
2139 * @nic : device private variable.
2141 * Free all queued Tx buffers.
2142 * Return Value: void
2145 static void free_tx_buffers(struct s2io_nic *nic)
2147 struct net_device *dev = nic->dev;
2148 struct sk_buff *skb;
2151 mac_info_t *mac_control;
2152 struct config_param *config;
2155 mac_control = &nic->mac_control;
2156 config = &nic->config;
2158 for (i = 0; i < config->tx_fifo_num; i++) {
2159 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2160 txdp = (TxD_t *) mac_control->fifos[i].list_info[j].
2162 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2169 "%s:forcibly freeing %d skbs on FIFO%d\n",
2171 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2172 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2177 * stop_nic - To stop the nic
2178 * @nic ; device private variable.
2180 * This function does exactly the opposite of what the start_nic()
2181 * function does. This function is called to stop the device.
2186 static void stop_nic(struct s2io_nic *nic)
2188 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2189 register u64 val64 = 0;
2191 mac_info_t *mac_control;
2192 struct config_param *config;
2194 mac_control = &nic->mac_control;
2195 config = &nic->config;
2197 /* Disable all interrupts */
2198 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2199 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2200 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
2201 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2203 /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2204 val64 = readq(&bar0->adapter_control);
2205 val64 &= ~(ADAPTER_CNTL_EN);
2206 writeq(val64, &bar0->adapter_control);
2209 static int fill_rxd_3buf(nic_t *nic, RxD_t *rxdp, struct sk_buff *skb)
2211 struct net_device *dev = nic->dev;
2212 struct sk_buff *frag_list;
2215 /* Buffer-1 receives L3/L4 headers */
2216 ((RxD3_t*)rxdp)->Buffer1_ptr = pci_map_single
2217 (nic->pdev, skb->data, l3l4hdr_size + 4,
2218 PCI_DMA_FROMDEVICE);
2220 /* skb_shinfo(skb)->frag_list will have L4 data payload */
2221 skb_shinfo(skb)->frag_list = dev_alloc_skb(dev->mtu + ALIGN_SIZE);
2222 if (skb_shinfo(skb)->frag_list == NULL) {
2223 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n ", dev->name);
2226 frag_list = skb_shinfo(skb)->frag_list;
2227 frag_list->next = NULL;
2228 tmp = (void *)ALIGN((long)frag_list->data, ALIGN_SIZE + 1);
2229 frag_list->data = tmp;
2230 frag_list->tail = tmp;
2232 /* Buffer-2 receives L4 data payload */
2233 ((RxD3_t*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev,
2234 frag_list->data, dev->mtu,
2235 PCI_DMA_FROMDEVICE);
2236 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
2237 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
2243 * fill_rx_buffers - Allocates the Rx side skbs
2244 * @nic: device private variable
2245 * @ring_no: ring number
2247 * The function allocates Rx side skbs and puts the physical
2248 * address of these buffers into the RxD buffer pointers, so that the NIC
2249 * can DMA the received frame into these locations.
2250 * The NIC supports 3 receive modes, viz
2252 * 2. three buffer and
2253 * 3. Five buffer modes.
2254 * Each mode defines how many fragments the received frame will be split
2255 * up into by the NIC. The frame is split into L3 header, L4 Header,
2256 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2257 * is split into 3 fragments. As of now only single buffer mode is
2260 * SUCCESS on success or an appropriate -ve value on failure.
2263 static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2265 struct net_device *dev = nic->dev;
2266 struct sk_buff *skb;
2268 int off, off1, size, block_no, block_no1;
2271 mac_info_t *mac_control;
2272 struct config_param *config;
2275 #ifndef CONFIG_S2IO_NAPI
2276 unsigned long flags;
2278 RxD_t *first_rxdp = NULL;
2280 mac_control = &nic->mac_control;
2281 config = &nic->config;
2282 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2283 atomic_read(&nic->rx_bufs_left[ring_no]);
2285 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
2286 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2287 while (alloc_tab < alloc_cnt) {
2288 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2290 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2292 rxdp = mac_control->rings[ring_no].
2293 rx_blocks[block_no].rxds[off].virt_addr;
2295 if ((block_no == block_no1) && (off == off1) &&
2296 (rxdp->Host_Control)) {
2297 DBG_PRINT(INTR_DBG, "%s: Get and Put",
2299 DBG_PRINT(INTR_DBG, " info equated\n");
2302 if (off && (off == rxd_count[nic->rxd_mode])) {
2303 mac_control->rings[ring_no].rx_curr_put_info.
2305 if (mac_control->rings[ring_no].rx_curr_put_info.
2306 block_index == mac_control->rings[ring_no].
2308 mac_control->rings[ring_no].rx_curr_put_info.
2310 block_no = mac_control->rings[ring_no].
2311 rx_curr_put_info.block_index;
2312 if (off == rxd_count[nic->rxd_mode])
2314 mac_control->rings[ring_no].rx_curr_put_info.
2316 rxdp = mac_control->rings[ring_no].
2317 rx_blocks[block_no].block_virt_addr;
2318 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2321 #ifndef CONFIG_S2IO_NAPI
2322 spin_lock_irqsave(&nic->put_lock, flags);
2323 mac_control->rings[ring_no].put_pos =
2324 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2325 spin_unlock_irqrestore(&nic->put_lock, flags);
2327 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2328 ((nic->rxd_mode >= RXD_MODE_3A) &&
2329 (rxdp->Control_2 & BIT(0)))) {
2330 mac_control->rings[ring_no].rx_curr_put_info.
2334 /* calculate size of skb based on ring mode */
2335 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2336 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2337 if (nic->rxd_mode == RXD_MODE_1)
2338 size += NET_IP_ALIGN;
2339 else if (nic->rxd_mode == RXD_MODE_3B)
2340 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2342 size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
2345 skb = dev_alloc_skb(size);
2347 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
2348 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
2351 first_rxdp->Control_1 |= RXD_OWN_XENA;
2355 if (nic->rxd_mode == RXD_MODE_1) {
2356 /* 1 buffer mode - normal operation mode */
2357 memset(rxdp, 0, sizeof(RxD1_t));
2358 skb_reserve(skb, NET_IP_ALIGN);
2359 ((RxD1_t*)rxdp)->Buffer0_ptr = pci_map_single
2360 (nic->pdev, skb->data, size - NET_IP_ALIGN,
2361 PCI_DMA_FROMDEVICE);
2362 rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2364 } else if (nic->rxd_mode >= RXD_MODE_3A) {
2366 * 2 or 3 buffer mode -
2367 * Both 2 buffer mode and 3 buffer mode provides 128
2368 * byte aligned receive buffers.
2370 * 3 buffer mode provides header separation where in
2371 * skb->data will have L3/L4 headers where as
2372 * skb_shinfo(skb)->frag_list will have the L4 data
2376 memset(rxdp, 0, sizeof(RxD3_t));
2377 ba = &mac_control->rings[ring_no].ba[block_no][off];
2378 skb_reserve(skb, BUF0_LEN);
2379 tmp = (u64)(unsigned long) skb->data;
2382 skb->data = (void *) (unsigned long)tmp;
2383 skb->tail = (void *) (unsigned long)tmp;
2385 ((RxD3_t*)rxdp)->Buffer0_ptr =
2386 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2387 PCI_DMA_FROMDEVICE);
2388 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2389 if (nic->rxd_mode == RXD_MODE_3B) {
2390 /* Two buffer mode */
2393 * Buffer2 will have L3/L4 header plus
2396 ((RxD3_t*)rxdp)->Buffer2_ptr = pci_map_single
2397 (nic->pdev, skb->data, dev->mtu + 4,
2398 PCI_DMA_FROMDEVICE);
2400 /* Buffer-1 will be dummy buffer not used */
2401 ((RxD3_t*)rxdp)->Buffer1_ptr =
2402 pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
2403 PCI_DMA_FROMDEVICE);
2404 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2405 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2409 if (fill_rxd_3buf(nic, rxdp, skb) == -ENOMEM) {
2410 dev_kfree_skb_irq(skb);
2413 first_rxdp->Control_1 |=
2419 rxdp->Control_2 |= BIT(0);
2421 rxdp->Host_Control = (unsigned long) (skb);
2422 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2423 rxdp->Control_1 |= RXD_OWN_XENA;
2425 if (off == (rxd_count[nic->rxd_mode] + 1))
2427 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2429 rxdp->Control_2 |= SET_RXD_MARKER;
2430 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2433 first_rxdp->Control_1 |= RXD_OWN_XENA;
2437 atomic_inc(&nic->rx_bufs_left[ring_no]);
2442 /* Transfer ownership of first descriptor to adapter just before
2443 * exiting. Before that, use memory barrier so that ownership
2444 * and other fields are seen by adapter correctly.
2448 first_rxdp->Control_1 |= RXD_OWN_XENA;
2454 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2456 struct net_device *dev = sp->dev;
2458 struct sk_buff *skb;
2460 mac_info_t *mac_control;
2463 mac_control = &sp->mac_control;
2464 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2465 rxdp = mac_control->rings[ring_no].
2466 rx_blocks[blk].rxds[j].virt_addr;
2467 skb = (struct sk_buff *)
2468 ((unsigned long) rxdp->Host_Control);
2472 if (sp->rxd_mode == RXD_MODE_1) {
2473 pci_unmap_single(sp->pdev, (dma_addr_t)
2474 ((RxD1_t*)rxdp)->Buffer0_ptr,
2476 HEADER_ETHERNET_II_802_3_SIZE
2477 + HEADER_802_2_SIZE +
2479 PCI_DMA_FROMDEVICE);
2480 memset(rxdp, 0, sizeof(RxD1_t));
2481 } else if(sp->rxd_mode == RXD_MODE_3B) {
2482 ba = &mac_control->rings[ring_no].
2484 pci_unmap_single(sp->pdev, (dma_addr_t)
2485 ((RxD3_t*)rxdp)->Buffer0_ptr,
2487 PCI_DMA_FROMDEVICE);
2488 pci_unmap_single(sp->pdev, (dma_addr_t)
2489 ((RxD3_t*)rxdp)->Buffer1_ptr,
2491 PCI_DMA_FROMDEVICE);
2492 pci_unmap_single(sp->pdev, (dma_addr_t)
2493 ((RxD3_t*)rxdp)->Buffer2_ptr,
2495 PCI_DMA_FROMDEVICE);
2496 memset(rxdp, 0, sizeof(RxD3_t));
2498 pci_unmap_single(sp->pdev, (dma_addr_t)
2499 ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN,
2500 PCI_DMA_FROMDEVICE);
2501 pci_unmap_single(sp->pdev, (dma_addr_t)
2502 ((RxD3_t*)rxdp)->Buffer1_ptr,
2504 PCI_DMA_FROMDEVICE);
2505 pci_unmap_single(sp->pdev, (dma_addr_t)
2506 ((RxD3_t*)rxdp)->Buffer2_ptr, dev->mtu,
2507 PCI_DMA_FROMDEVICE);
2508 memset(rxdp, 0, sizeof(RxD3_t));
2511 atomic_dec(&sp->rx_bufs_left[ring_no]);
2516 * free_rx_buffers - Frees all Rx buffers
2517 * @sp: device private variable.
2519 * This function will free all Rx buffers allocated by host.
2524 static void free_rx_buffers(struct s2io_nic *sp)
2526 struct net_device *dev = sp->dev;
2527 int i, blk = 0, buf_cnt = 0;
2528 mac_info_t *mac_control;
2529 struct config_param *config;
2531 mac_control = &sp->mac_control;
2532 config = &sp->config;
2534 for (i = 0; i < config->rx_ring_num; i++) {
2535 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2536 free_rxd_blk(sp,i,blk);
2538 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2539 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2540 mac_control->rings[i].rx_curr_put_info.offset = 0;
2541 mac_control->rings[i].rx_curr_get_info.offset = 0;
2542 atomic_set(&sp->rx_bufs_left[i], 0);
2543 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2544 dev->name, buf_cnt, i);
2549 * s2io_poll - Rx interrupt handler for NAPI support
2550 * @dev : pointer to the device structure.
2551 * @budget : The number of packets that were budgeted to be processed
2552 * during one pass through the 'Poll" function.
2554 * Comes into picture only if NAPI support has been incorporated. It does
2555 * the same thing that rx_intr_handler does, but not in a interrupt context
2556 * also It will process only a given number of packets.
2558 * 0 on success and 1 if there are No Rx packets to be processed.
2561 #if defined(CONFIG_S2IO_NAPI)
2562 static int s2io_poll(struct net_device *dev, int *budget)
2564 nic_t *nic = dev->priv;
2565 int pkt_cnt = 0, org_pkts_to_process;
2566 mac_info_t *mac_control;
2567 struct config_param *config;
2568 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2569 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2572 atomic_inc(&nic->isr_cnt);
2573 mac_control = &nic->mac_control;
2574 config = &nic->config;
2576 nic->pkts_to_process = *budget;
2577 if (nic->pkts_to_process > dev->quota)
2578 nic->pkts_to_process = dev->quota;
2579 org_pkts_to_process = nic->pkts_to_process;
2581 writeq(val64, &bar0->rx_traffic_int);
2582 val64 = readl(&bar0->rx_traffic_int);
2584 for (i = 0; i < config->rx_ring_num; i++) {
2585 rx_intr_handler(&mac_control->rings[i]);
2586 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2587 if (!nic->pkts_to_process) {
2588 /* Quota for the current iteration has been met */
2595 dev->quota -= pkt_cnt;
2597 netif_rx_complete(dev);
2599 for (i = 0; i < config->rx_ring_num; i++) {
2600 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2601 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2602 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2606 /* Re enable the Rx interrupts. */
2607 writeq(0x0, &bar0->rx_traffic_mask);
2608 val64 = readl(&bar0->rx_traffic_mask);
2609 atomic_dec(&nic->isr_cnt);
2613 dev->quota -= pkt_cnt;
2616 for (i = 0; i < config->rx_ring_num; i++) {
2617 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2618 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2619 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2623 atomic_dec(&nic->isr_cnt);
2629 * s2io_netpoll - Rx interrupt service handler for netpoll support
2630 * @dev : pointer to the device structure.
2632 * Polling 'interrupt' - used by things like netconsole to send skbs
2633 * without having to re-enable interrupts. It's not called while
2634 * the interrupt routine is executing.
2637 #ifdef CONFIG_NET_POLL_CONTROLLER
2638 static void s2io_netpoll(struct net_device *dev)
2640 nic_t *nic = dev->priv;
2641 mac_info_t *mac_control;
2642 struct config_param *config;
2643 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2647 disable_irq(dev->irq);
2649 atomic_inc(&nic->isr_cnt);
2650 mac_control = &nic->mac_control;
2651 config = &nic->config;
2653 val64 = readq(&bar0->rx_traffic_int);
2654 writeq(val64, &bar0->rx_traffic_int);
2656 for (i = 0; i < config->rx_ring_num; i++)
2657 rx_intr_handler(&mac_control->rings[i]);
2659 for (i = 0; i < config->rx_ring_num; i++) {
2660 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2661 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2662 DBG_PRINT(ERR_DBG, " in Rx Netpoll!!\n");
2666 atomic_dec(&nic->isr_cnt);
2667 enable_irq(dev->irq);
2673 * rx_intr_handler - Rx interrupt handler
2674 * @nic: device private variable.
2676 * If the interrupt is because of a received frame or if the
2677 * receive ring contains fresh as yet un-processed frames,this function is
2678 * called. It picks out the RxD at which place the last Rx processing had
2679 * stopped and sends the skb to the OSM's Rx handler and then increments
2684 static void rx_intr_handler(ring_info_t *ring_data)
2686 nic_t *nic = ring_data->nic;
2687 struct net_device *dev = (struct net_device *) nic->dev;
2688 int get_block, put_block, put_offset;
2689 rx_curr_get_info_t get_info, put_info;
2691 struct sk_buff *skb;
2692 #ifndef CONFIG_S2IO_NAPI
2697 spin_lock(&nic->rx_lock);
2698 if (atomic_read(&nic->card_state) == CARD_DOWN) {
2699 DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n",
2700 __FUNCTION__, dev->name);
2701 spin_unlock(&nic->rx_lock);
2705 get_info = ring_data->rx_curr_get_info;
2706 get_block = get_info.block_index;
2707 put_info = ring_data->rx_curr_put_info;
2708 put_block = put_info.block_index;
2709 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2710 #ifndef CONFIG_S2IO_NAPI
2711 spin_lock(&nic->put_lock);
2712 put_offset = ring_data->put_pos;
2713 spin_unlock(&nic->put_lock);
2715 put_offset = (put_block * (rxd_count[nic->rxd_mode] + 1)) +
2718 while (RXD_IS_UP2DT(rxdp)) {
2719 /* If your are next to put index then it's FIFO full condition */
2720 if ((get_block == put_block) &&
2721 (get_info.offset + 1) == put_info.offset) {
2722 DBG_PRINT(ERR_DBG, "%s: Ring Full\n",dev->name);
2725 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2727 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2729 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2730 spin_unlock(&nic->rx_lock);
2733 if (nic->rxd_mode == RXD_MODE_1) {
2734 pci_unmap_single(nic->pdev, (dma_addr_t)
2735 ((RxD1_t*)rxdp)->Buffer0_ptr,
2737 HEADER_ETHERNET_II_802_3_SIZE +
2740 PCI_DMA_FROMDEVICE);
2741 } else if (nic->rxd_mode == RXD_MODE_3B) {
2742 pci_unmap_single(nic->pdev, (dma_addr_t)
2743 ((RxD3_t*)rxdp)->Buffer0_ptr,
2744 BUF0_LEN, PCI_DMA_FROMDEVICE);
2745 pci_unmap_single(nic->pdev, (dma_addr_t)
2746 ((RxD3_t*)rxdp)->Buffer1_ptr,
2747 BUF1_LEN, PCI_DMA_FROMDEVICE);
2748 pci_unmap_single(nic->pdev, (dma_addr_t)
2749 ((RxD3_t*)rxdp)->Buffer2_ptr,
2751 PCI_DMA_FROMDEVICE);
2753 pci_unmap_single(nic->pdev, (dma_addr_t)
2754 ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN,
2755 PCI_DMA_FROMDEVICE);
2756 pci_unmap_single(nic->pdev, (dma_addr_t)
2757 ((RxD3_t*)rxdp)->Buffer1_ptr,
2759 PCI_DMA_FROMDEVICE);
2760 pci_unmap_single(nic->pdev, (dma_addr_t)
2761 ((RxD3_t*)rxdp)->Buffer2_ptr,
2762 dev->mtu, PCI_DMA_FROMDEVICE);
2764 prefetch(skb->data);
2765 rx_osm_handler(ring_data, rxdp);
2767 ring_data->rx_curr_get_info.offset = get_info.offset;
2768 rxdp = ring_data->rx_blocks[get_block].
2769 rxds[get_info.offset].virt_addr;
2770 if (get_info.offset == rxd_count[nic->rxd_mode]) {
2771 get_info.offset = 0;
2772 ring_data->rx_curr_get_info.offset = get_info.offset;
2774 if (get_block == ring_data->block_count)
2776 ring_data->rx_curr_get_info.block_index = get_block;
2777 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2780 #ifdef CONFIG_S2IO_NAPI
2781 nic->pkts_to_process -= 1;
2782 if (!nic->pkts_to_process)
2786 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2791 /* Clear all LRO sessions before exiting */
2792 for (i=0; i<MAX_LRO_SESSIONS; i++) {
2793 lro_t *lro = &nic->lro0_n[i];
2795 update_L3L4_header(nic, lro);
2796 queue_rx_frame(lro->parent);
2797 clear_lro_session(lro);
2802 spin_unlock(&nic->rx_lock);
2806 * tx_intr_handler - Transmit interrupt handler
2807 * @nic : device private variable
2809 * If an interrupt was raised to indicate DMA complete of the
2810 * Tx packet, this function is called. It identifies the last TxD
2811 * whose buffer was freed and frees all skbs whose data have already
2812 * DMA'ed into the NICs internal memory.
2817 static void tx_intr_handler(fifo_info_t *fifo_data)
2819 nic_t *nic = fifo_data->nic;
2820 struct net_device *dev = (struct net_device *) nic->dev;
2821 tx_curr_get_info_t get_info, put_info;
2822 struct sk_buff *skb;
2825 get_info = fifo_data->tx_curr_get_info;
2826 put_info = fifo_data->tx_curr_put_info;
2827 txdlp = (TxD_t *) fifo_data->list_info[get_info.offset].
2829 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2830 (get_info.offset != put_info.offset) &&
2831 (txdlp->Host_Control)) {
2832 /* Check for TxD errors */
2833 if (txdlp->Control_1 & TXD_T_CODE) {
2834 unsigned long long err;
2835 err = txdlp->Control_1 & TXD_T_CODE;
2837 nic->mac_control.stats_info->sw_stat.
2840 if ((err >> 48) == 0xA) {
2841 DBG_PRINT(TX_DBG, "TxD returned due \
2842 to loss of link\n");
2845 DBG_PRINT(ERR_DBG, "***TxD error \
2850 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
2852 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2854 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2858 /* Updating the statistics block */
2859 nic->stats.tx_bytes += skb->len;
2860 dev_kfree_skb_irq(skb);
2863 if (get_info.offset == get_info.fifo_len + 1)
2864 get_info.offset = 0;
2865 txdlp = (TxD_t *) fifo_data->list_info
2866 [get_info.offset].list_virt_addr;
2867 fifo_data->tx_curr_get_info.offset =
2871 spin_lock(&nic->tx_lock);
2872 if (netif_queue_stopped(dev))
2873 netif_wake_queue(dev);
2874 spin_unlock(&nic->tx_lock);
2878 * s2io_mdio_write - Function to write in to MDIO registers
2879 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2880 * @addr : address value
2881 * @value : data value
2882 * @dev : pointer to net_device structure
2884 * This function is used to write values to the MDIO registers
2887 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
2890 nic_t *sp = dev->priv;
2891 XENA_dev_config_t *bar0 = (XENA_dev_config_t *)sp->bar0;
2893 //address transaction
2894 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2895 | MDIO_MMD_DEV_ADDR(mmd_type)
2896 | MDIO_MMS_PRT_ADDR(0x0);
2897 writeq(val64, &bar0->mdio_control);
2898 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2899 writeq(val64, &bar0->mdio_control);
2904 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2905 | MDIO_MMD_DEV_ADDR(mmd_type)
2906 | MDIO_MMS_PRT_ADDR(0x0)
2907 | MDIO_MDIO_DATA(value)
2908 | MDIO_OP(MDIO_OP_WRITE_TRANS);
2909 writeq(val64, &bar0->mdio_control);
2910 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2911 writeq(val64, &bar0->mdio_control);
2915 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2916 | MDIO_MMD_DEV_ADDR(mmd_type)
2917 | MDIO_MMS_PRT_ADDR(0x0)
2918 | MDIO_OP(MDIO_OP_READ_TRANS);
2919 writeq(val64, &bar0->mdio_control);
2920 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2921 writeq(val64, &bar0->mdio_control);
2927 * s2io_mdio_read - Function to write in to MDIO registers
2928 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2929 * @addr : address value
2930 * @dev : pointer to net_device structure
2932 * This function is used to read values to the MDIO registers
2935 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
2939 nic_t *sp = dev->priv;
2940 XENA_dev_config_t *bar0 = (XENA_dev_config_t *)sp->bar0;
2942 /* address transaction */
2943 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2944 | MDIO_MMD_DEV_ADDR(mmd_type)
2945 | MDIO_MMS_PRT_ADDR(0x0);
2946 writeq(val64, &bar0->mdio_control);
2947 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2948 writeq(val64, &bar0->mdio_control);
2951 /* Data transaction */
2953 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2954 | MDIO_MMD_DEV_ADDR(mmd_type)
2955 | MDIO_MMS_PRT_ADDR(0x0)
2956 | MDIO_OP(MDIO_OP_READ_TRANS);
2957 writeq(val64, &bar0->mdio_control);
2958 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2959 writeq(val64, &bar0->mdio_control);
2962 /* Read the value from regs */
2963 rval64 = readq(&bar0->mdio_control);
2964 rval64 = rval64 & 0xFFFF0000;
2965 rval64 = rval64 >> 16;
2969 * s2io_chk_xpak_counter - Function to check the status of the xpak counters
2970 * @counter : couter value to be updated
2971 * @flag : flag to indicate the status
2972 * @type : counter type
2974 * This function is to check the status of the xpak counters value
2978 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
2983 for(i = 0; i <index; i++)
2988 *counter = *counter + 1;
2989 val64 = *regs_stat & mask;
2990 val64 = val64 >> (index * 0x2);
2997 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
2998 "service. Excessive temperatures may "
2999 "result in premature transceiver "
3003 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3004 "service Excessive bias currents may "
3005 "indicate imminent laser diode "
3009 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3010 "service Excessive laser output "
3011 "power may saturate far-end "
3015 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
3020 val64 = val64 << (index * 0x2);
3021 *regs_stat = (*regs_stat & (~mask)) | (val64);
3024 *regs_stat = *regs_stat & (~mask);
3029 * s2io_updt_xpak_counter - Function to update the xpak counters
3030 * @dev : pointer to net_device struct
3032 * This function is to upate the status of the xpak counters value
3035 static void s2io_updt_xpak_counter(struct net_device *dev)
3043 nic_t *sp = dev->priv;
3044 StatInfo_t *stat_info = sp->mac_control.stats_info;
3046 /* Check the communication with the MDIO slave */
3049 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3050 if((val64 == 0xFFFF) || (val64 == 0x0000))
3052 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3053 "Returned %llx\n", (unsigned long long)val64);
3057 /* Check for the expecte value of 2040 at PMA address 0x0000 */
3060 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3061 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3062 (unsigned long long)val64);
3066 /* Loading the DOM register to MDIO register */
3068 s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3069 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3071 /* Reading the Alarm flags */
3074 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3076 flag = CHECKBIT(val64, 0x7);
3078 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3079 &stat_info->xpak_stat.xpak_regs_stat,
3082 if(CHECKBIT(val64, 0x6))
3083 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3085 flag = CHECKBIT(val64, 0x3);
3087 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3088 &stat_info->xpak_stat.xpak_regs_stat,
3091 if(CHECKBIT(val64, 0x2))
3092 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3094 flag = CHECKBIT(val64, 0x1);
3096 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3097 &stat_info->xpak_stat.xpak_regs_stat,
3100 if(CHECKBIT(val64, 0x0))
3101 stat_info->xpak_stat.alarm_laser_output_power_low++;
3103 /* Reading the Warning flags */
3106 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3108 if(CHECKBIT(val64, 0x7))
3109 stat_info->xpak_stat.warn_transceiver_temp_high++;
3111 if(CHECKBIT(val64, 0x6))
3112 stat_info->xpak_stat.warn_transceiver_temp_low++;