1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2005 Neterion Inc.
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
30 * rx_ring_num : This can be used to program the number of receive rings used
32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
33 * This is also an array of size 8.
34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35 * values are 1, 2 and 3.
36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
38 * Tx descriptors that can be associated with each corresponding FIFO.
39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40 * 1(MSI), 2(MSI_X). Default value is '0(INTA)'
41 * lro: Specifies whether to enable Large Receive Offload (LRO) or not.
42 * Possible values '1' for enable '0' for disable. Default is '0'
43 * lro_max_pkts: This parameter defines maximum number of packets can be
44 * aggregated as a single large packet
45 ************************************************************************/
47 #include <linux/module.h>
48 #include <linux/types.h>
49 #include <linux/errno.h>
50 #include <linux/ioport.h>
51 #include <linux/pci.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/kernel.h>
54 #include <linux/netdevice.h>
55 #include <linux/etherdevice.h>
56 #include <linux/skbuff.h>
57 #include <linux/init.h>
58 #include <linux/delay.h>
59 #include <linux/stddef.h>
60 #include <linux/ioctl.h>
61 #include <linux/timex.h>
62 #include <linux/sched.h>
63 #include <linux/ethtool.h>
64 #include <linux/workqueue.h>
65 #include <linux/if_vlan.h>
67 #include <linux/tcp.h>
70 #include <asm/system.h>
71 #include <asm/uaccess.h>
73 #include <asm/div64.h>
78 #include "s2io-regs.h"
80 #define DRV_VERSION "2.0.15.2"
82 /* S2io Driver name & version. */
83 static char s2io_driver_name[] = "Neterion";
84 static char s2io_driver_version[] = DRV_VERSION;
86 static int rxd_size[4] = {32,48,48,64};
87 static int rxd_count[4] = {127,85,85,63};
89 static inline int RXD_IS_UP2DT(RxD_t *rxdp)
93 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
94 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
100 * Cards with following subsystem_id have a link state indication
101 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
102 * macro below identifies these cards given the subsystem_id.
104 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
105 (dev_type == XFRAME_I_DEVICE) ? \
106 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
107 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
109 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
110 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
111 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
114 static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
116 mac_info_t *mac_control;
118 mac_control = &sp->mac_control;
119 if (rxb_size <= rxd_count[sp->rxd_mode])
121 else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
126 /* Ethtool related variables and Macros. */
127 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
128 "Register test\t(offline)",
129 "Eeprom test\t(offline)",
130 "Link test\t(online)",
131 "RLDRAM test\t(offline)",
132 "BIST Test\t(offline)"
135 static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
137 {"tmac_data_octets"},
141 {"tmac_pause_ctrl_frms"},
145 {"tmac_any_err_frms"},
146 {"tmac_ttl_less_fb_octets"},
147 {"tmac_vld_ip_octets"},
155 {"rmac_data_octets"},
156 {"rmac_fcs_err_frms"},
158 {"rmac_vld_mcst_frms"},
159 {"rmac_vld_bcst_frms"},
160 {"rmac_in_rng_len_err_frms"},
161 {"rmac_out_rng_len_err_frms"},
163 {"rmac_pause_ctrl_frms"},
164 {"rmac_unsup_ctrl_frms"},
166 {"rmac_accepted_ucst_frms"},
167 {"rmac_accepted_nucst_frms"},
168 {"rmac_discarded_frms"},
169 {"rmac_drop_events"},
170 {"rmac_ttl_less_fb_octets"},
172 {"rmac_usized_frms"},
173 {"rmac_osized_frms"},
175 {"rmac_jabber_frms"},
176 {"rmac_ttl_64_frms"},
177 {"rmac_ttl_65_127_frms"},
178 {"rmac_ttl_128_255_frms"},
179 {"rmac_ttl_256_511_frms"},
180 {"rmac_ttl_512_1023_frms"},
181 {"rmac_ttl_1024_1518_frms"},
189 {"rmac_err_drp_udp"},
190 {"rmac_xgmii_err_sym"},
208 {"rmac_xgmii_data_err_cnt"},
209 {"rmac_xgmii_ctrl_err_cnt"},
210 {"rmac_accepted_ip"},
214 {"new_rd_req_rtry_cnt"},
216 {"wr_rtry_rd_ack_cnt"},
219 {"new_wr_req_rtry_cnt"},
222 {"rd_rtry_wr_ack_cnt"},
230 {"rmac_ttl_1519_4095_frms"},
231 {"rmac_ttl_4096_8191_frms"},
232 {"rmac_ttl_8192_max_frms"},
233 {"rmac_ttl_gt_max_frms"},
234 {"rmac_osized_alt_frms"},
235 {"rmac_jabber_alt_frms"},
236 {"rmac_gt_max_alt_frms"},
238 {"rmac_len_discard"},
239 {"rmac_fcs_discard"},
242 {"rmac_red_discard"},
243 {"rmac_rts_discard"},
244 {"rmac_ingm_full_discard"},
246 {"\n DRIVER STATISTICS"},
247 {"single_bit_ecc_errs"},
248 {"double_bit_ecc_errs"},
254 ("alarm_transceiver_temp_high"),
255 ("alarm_transceiver_temp_low"),
256 ("alarm_laser_bias_current_high"),
257 ("alarm_laser_bias_current_low"),
258 ("alarm_laser_output_power_high"),
259 ("alarm_laser_output_power_low"),
260 ("warn_transceiver_temp_high"),
261 ("warn_transceiver_temp_low"),
262 ("warn_laser_bias_current_high"),
263 ("warn_laser_bias_current_low"),
264 ("warn_laser_output_power_high"),
265 ("warn_laser_output_power_low"),
266 ("lro_aggregated_pkts"),
267 ("lro_flush_both_count"),
268 ("lro_out_of_sequence_pkts"),
269 ("lro_flush_due_to_max_pkts"),
270 ("lro_avg_aggr_pkts"),
273 #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
274 #define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
276 #define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
277 #define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
279 #define S2IO_TIMER_CONF(timer, handle, arg, exp) \
280 init_timer(&timer); \
281 timer.function = handle; \
282 timer.data = (unsigned long) arg; \
283 mod_timer(&timer, (jiffies + exp)) \
286 static void s2io_vlan_rx_register(struct net_device *dev,
287 struct vlan_group *grp)
289 nic_t *nic = dev->priv;
292 spin_lock_irqsave(&nic->tx_lock, flags);
294 spin_unlock_irqrestore(&nic->tx_lock, flags);
297 /* Unregister the vlan */
298 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
300 nic_t *nic = dev->priv;
303 spin_lock_irqsave(&nic->tx_lock, flags);
305 nic->vlgrp->vlan_devices[vid] = NULL;
306 spin_unlock_irqrestore(&nic->tx_lock, flags);
310 * Constants to be programmed into the Xena's registers, to configure
315 static const u64 herc_act_dtx_cfg[] = {
317 0x8000051536750000ULL, 0x80000515367500E0ULL,
319 0x8000051536750004ULL, 0x80000515367500E4ULL,
321 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
323 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
325 0x801205150D440000ULL, 0x801205150D4400E0ULL,
327 0x801205150D440004ULL, 0x801205150D4400E4ULL,
329 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
331 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
336 static const u64 xena_dtx_cfg[] = {
338 0x8000051500000000ULL, 0x80000515000000E0ULL,
340 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
342 0x8001051500000000ULL, 0x80010515000000E0ULL,
344 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
346 0x8002051500000000ULL, 0x80020515000000E0ULL,
348 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
353 * Constants for Fixing the MacAddress problem seen mostly on
356 static const u64 fix_mac[] = {
357 0x0060000000000000ULL, 0x0060600000000000ULL,
358 0x0040600000000000ULL, 0x0000600000000000ULL,
359 0x0020600000000000ULL, 0x0060600000000000ULL,
360 0x0020600000000000ULL, 0x0060600000000000ULL,
361 0x0020600000000000ULL, 0x0060600000000000ULL,
362 0x0020600000000000ULL, 0x0060600000000000ULL,
363 0x0020600000000000ULL, 0x0060600000000000ULL,
364 0x0020600000000000ULL, 0x0060600000000000ULL,
365 0x0020600000000000ULL, 0x0060600000000000ULL,
366 0x0020600000000000ULL, 0x0060600000000000ULL,
367 0x0020600000000000ULL, 0x0060600000000000ULL,
368 0x0020600000000000ULL, 0x0060600000000000ULL,
369 0x0020600000000000ULL, 0x0000600000000000ULL,
370 0x0040600000000000ULL, 0x0060600000000000ULL,
374 MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
375 MODULE_LICENSE("GPL");
376 MODULE_VERSION(DRV_VERSION);
379 /* Module Loadable parameters. */
380 S2IO_PARM_INT(tx_fifo_num, 1);
381 S2IO_PARM_INT(rx_ring_num, 1);
384 S2IO_PARM_INT(rx_ring_mode, 1);
385 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
386 S2IO_PARM_INT(rmac_pause_time, 0x100);
387 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
388 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
389 S2IO_PARM_INT(shared_splits, 0);
390 S2IO_PARM_INT(tmac_util_period, 5);
391 S2IO_PARM_INT(rmac_util_period, 5);
392 S2IO_PARM_INT(bimodal, 0);
393 S2IO_PARM_INT(l3l4hdr_size, 128);
394 /* Frequency of Rx desc syncs expressed as power of 2 */
395 S2IO_PARM_INT(rxsync_frequency, 3);
396 /* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */
397 S2IO_PARM_INT(intr_type, 0);
398 /* Large receive offload feature */
399 S2IO_PARM_INT(lro, 0);
400 /* Max pkts to be aggregated by LRO at one time. If not specified,
401 * aggregation happens until we hit max IP pkt size(64K)
403 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
404 S2IO_PARM_INT(indicate_max_pkts, 0);
406 S2IO_PARM_INT(napi, 1);
407 S2IO_PARM_INT(ufo, 0);
409 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
410 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
411 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
412 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
413 static unsigned int rts_frm_len[MAX_RX_RINGS] =
414 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
416 module_param_array(tx_fifo_len, uint, NULL, 0);
417 module_param_array(rx_ring_sz, uint, NULL, 0);
418 module_param_array(rts_frm_len, uint, NULL, 0);
422 * This table lists all the devices that this driver supports.
424 static struct pci_device_id s2io_tbl[] __devinitdata = {
425 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
426 PCI_ANY_ID, PCI_ANY_ID},
427 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
428 PCI_ANY_ID, PCI_ANY_ID},
429 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
430 PCI_ANY_ID, PCI_ANY_ID},
431 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
432 PCI_ANY_ID, PCI_ANY_ID},
436 MODULE_DEVICE_TABLE(pci, s2io_tbl);
438 static struct pci_driver s2io_driver = {
440 .id_table = s2io_tbl,
441 .probe = s2io_init_nic,
442 .remove = __devexit_p(s2io_rem_nic),
445 /* A simplifier macro used both by init and free shared_mem Fns(). */
446 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
449 * init_shared_mem - Allocation and Initialization of Memory
450 * @nic: Device private variable.
451 * Description: The function allocates all the memory areas shared
452 * between the NIC and the driver. This includes Tx descriptors,
453 * Rx descriptors and the statistics block.
456 static int init_shared_mem(struct s2io_nic *nic)
459 void *tmp_v_addr, *tmp_v_addr_next;
460 dma_addr_t tmp_p_addr, tmp_p_addr_next;
461 RxD_block_t *pre_rxd_blk = NULL;
462 int i, j, blk_cnt, rx_sz, tx_sz;
463 int lst_size, lst_per_page;
464 struct net_device *dev = nic->dev;
468 mac_info_t *mac_control;
469 struct config_param *config;
471 mac_control = &nic->mac_control;
472 config = &nic->config;
475 /* Allocation and initialization of TXDLs in FIOFs */
477 for (i = 0; i < config->tx_fifo_num; i++) {
478 size += config->tx_cfg[i].fifo_len;
480 if (size > MAX_AVAILABLE_TXDS) {
481 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
482 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
486 lst_size = (sizeof(TxD_t) * config->max_txds);
487 tx_sz = lst_size * size;
488 lst_per_page = PAGE_SIZE / lst_size;
490 for (i = 0; i < config->tx_fifo_num; i++) {
491 int fifo_len = config->tx_cfg[i].fifo_len;
492 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
493 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
495 if (!mac_control->fifos[i].list_info) {
497 "Malloc failed for list_info\n");
500 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
502 for (i = 0; i < config->tx_fifo_num; i++) {
503 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
505 mac_control->fifos[i].tx_curr_put_info.offset = 0;
506 mac_control->fifos[i].tx_curr_put_info.fifo_len =
507 config->tx_cfg[i].fifo_len - 1;
508 mac_control->fifos[i].tx_curr_get_info.offset = 0;
509 mac_control->fifos[i].tx_curr_get_info.fifo_len =
510 config->tx_cfg[i].fifo_len - 1;
511 mac_control->fifos[i].fifo_no = i;
512 mac_control->fifos[i].nic = nic;
513 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
515 for (j = 0; j < page_num; j++) {
519 tmp_v = pci_alloc_consistent(nic->pdev,
523 "pci_alloc_consistent ");
524 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
527 /* If we got a zero DMA address(can happen on
528 * certain platforms like PPC), reallocate.
529 * Store virtual address of page we don't want,
533 mac_control->zerodma_virt_addr = tmp_v;
535 "%s: Zero DMA address for TxDL. ", dev->name);
537 "Virtual address %p\n", tmp_v);
538 tmp_v = pci_alloc_consistent(nic->pdev,
542 "pci_alloc_consistent ");
543 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
547 while (k < lst_per_page) {
548 int l = (j * lst_per_page) + k;
549 if (l == config->tx_cfg[i].fifo_len)
551 mac_control->fifos[i].list_info[l].list_virt_addr =
552 tmp_v + (k * lst_size);
553 mac_control->fifos[i].list_info[l].list_phy_addr =
554 tmp_p + (k * lst_size);
560 nic->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
561 if (!nic->ufo_in_band_v)
564 /* Allocation and initialization of RXDs in Rings */
566 for (i = 0; i < config->rx_ring_num; i++) {
567 if (config->rx_cfg[i].num_rxd %
568 (rxd_count[nic->rxd_mode] + 1)) {
569 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
570 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
572 DBG_PRINT(ERR_DBG, "RxDs per Block");
575 size += config->rx_cfg[i].num_rxd;
576 mac_control->rings[i].block_count =
577 config->rx_cfg[i].num_rxd /
578 (rxd_count[nic->rxd_mode] + 1 );
579 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
580 mac_control->rings[i].block_count;
582 if (nic->rxd_mode == RXD_MODE_1)
583 size = (size * (sizeof(RxD1_t)));
585 size = (size * (sizeof(RxD3_t)));
588 for (i = 0; i < config->rx_ring_num; i++) {
589 mac_control->rings[i].rx_curr_get_info.block_index = 0;
590 mac_control->rings[i].rx_curr_get_info.offset = 0;
591 mac_control->rings[i].rx_curr_get_info.ring_len =
592 config->rx_cfg[i].num_rxd - 1;
593 mac_control->rings[i].rx_curr_put_info.block_index = 0;
594 mac_control->rings[i].rx_curr_put_info.offset = 0;
595 mac_control->rings[i].rx_curr_put_info.ring_len =
596 config->rx_cfg[i].num_rxd - 1;
597 mac_control->rings[i].nic = nic;
598 mac_control->rings[i].ring_no = i;
600 blk_cnt = config->rx_cfg[i].num_rxd /
601 (rxd_count[nic->rxd_mode] + 1);
602 /* Allocating all the Rx blocks */
603 for (j = 0; j < blk_cnt; j++) {
604 rx_block_info_t *rx_blocks;
607 rx_blocks = &mac_control->rings[i].rx_blocks[j];
608 size = SIZE_OF_BLOCK; //size is always page size
609 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
611 if (tmp_v_addr == NULL) {
613 * In case of failure, free_shared_mem()
614 * is called, which should free any
615 * memory that was alloced till the
618 rx_blocks->block_virt_addr = tmp_v_addr;
621 memset(tmp_v_addr, 0, size);
622 rx_blocks->block_virt_addr = tmp_v_addr;
623 rx_blocks->block_dma_addr = tmp_p_addr;
624 rx_blocks->rxds = kmalloc(sizeof(rxd_info_t)*
625 rxd_count[nic->rxd_mode],
627 for (l=0; l<rxd_count[nic->rxd_mode];l++) {
628 rx_blocks->rxds[l].virt_addr =
629 rx_blocks->block_virt_addr +
630 (rxd_size[nic->rxd_mode] * l);
631 rx_blocks->rxds[l].dma_addr =
632 rx_blocks->block_dma_addr +
633 (rxd_size[nic->rxd_mode] * l);
636 /* Interlinking all Rx Blocks */
637 for (j = 0; j < blk_cnt; j++) {
639 mac_control->rings[i].rx_blocks[j].block_virt_addr;
641 mac_control->rings[i].rx_blocks[(j + 1) %
642 blk_cnt].block_virt_addr;
644 mac_control->rings[i].rx_blocks[j].block_dma_addr;
646 mac_control->rings[i].rx_blocks[(j + 1) %
647 blk_cnt].block_dma_addr;
649 pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
650 pre_rxd_blk->reserved_2_pNext_RxD_block =
651 (unsigned long) tmp_v_addr_next;
652 pre_rxd_blk->pNext_RxD_Blk_physical =
653 (u64) tmp_p_addr_next;
656 if (nic->rxd_mode >= RXD_MODE_3A) {
658 * Allocation of Storages for buffer addresses in 2BUFF mode
659 * and the buffers as well.
661 for (i = 0; i < config->rx_ring_num; i++) {
662 blk_cnt = config->rx_cfg[i].num_rxd /
663 (rxd_count[nic->rxd_mode]+ 1);
664 mac_control->rings[i].ba =
665 kmalloc((sizeof(buffAdd_t *) * blk_cnt),
667 if (!mac_control->rings[i].ba)
669 for (j = 0; j < blk_cnt; j++) {
671 mac_control->rings[i].ba[j] =
672 kmalloc((sizeof(buffAdd_t) *
673 (rxd_count[nic->rxd_mode] + 1)),
675 if (!mac_control->rings[i].ba[j])
677 while (k != rxd_count[nic->rxd_mode]) {
678 ba = &mac_control->rings[i].ba[j][k];
680 ba->ba_0_org = (void *) kmalloc
681 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
684 tmp = (unsigned long)ba->ba_0_org;
686 tmp &= ~((unsigned long) ALIGN_SIZE);
687 ba->ba_0 = (void *) tmp;
689 ba->ba_1_org = (void *) kmalloc
690 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
693 tmp = (unsigned long) ba->ba_1_org;
695 tmp &= ~((unsigned long) ALIGN_SIZE);
696 ba->ba_1 = (void *) tmp;
703 /* Allocation and initialization of Statistics block */
704 size = sizeof(StatInfo_t);
705 mac_control->stats_mem = pci_alloc_consistent
706 (nic->pdev, size, &mac_control->stats_mem_phy);
708 if (!mac_control->stats_mem) {
710 * In case of failure, free_shared_mem() is called, which
711 * should free any memory that was alloced till the
716 mac_control->stats_mem_sz = size;
718 tmp_v_addr = mac_control->stats_mem;
719 mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
720 memset(tmp_v_addr, 0, size);
721 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
722 (unsigned long long) tmp_p_addr);
728 * free_shared_mem - Free the allocated Memory
729 * @nic: Device private variable.
730 * Description: This function is to free all memory locations allocated by
731 * the init_shared_mem() function and return it to the kernel.
734 static void free_shared_mem(struct s2io_nic *nic)
736 int i, j, blk_cnt, size;
738 dma_addr_t tmp_p_addr;
739 mac_info_t *mac_control;
740 struct config_param *config;
741 int lst_size, lst_per_page;
742 struct net_device *dev = nic->dev;
747 mac_control = &nic->mac_control;
748 config = &nic->config;
750 lst_size = (sizeof(TxD_t) * config->max_txds);
751 lst_per_page = PAGE_SIZE / lst_size;
753 for (i = 0; i < config->tx_fifo_num; i++) {
754 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
756 for (j = 0; j < page_num; j++) {
757 int mem_blks = (j * lst_per_page);
758 if (!mac_control->fifos[i].list_info)
760 if (!mac_control->fifos[i].list_info[mem_blks].
763 pci_free_consistent(nic->pdev, PAGE_SIZE,
764 mac_control->fifos[i].
767 mac_control->fifos[i].
771 /* If we got a zero DMA address during allocation,
774 if (mac_control->zerodma_virt_addr) {
775 pci_free_consistent(nic->pdev, PAGE_SIZE,
776 mac_control->zerodma_virt_addr,
779 "%s: Freeing TxDL with zero DMA addr. ",
781 DBG_PRINT(INIT_DBG, "Virtual address %p\n",
782 mac_control->zerodma_virt_addr);
784 kfree(mac_control->fifos[i].list_info);
787 size = SIZE_OF_BLOCK;
788 for (i = 0; i < config->rx_ring_num; i++) {
789 blk_cnt = mac_control->rings[i].block_count;
790 for (j = 0; j < blk_cnt; j++) {
791 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
793 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
795 if (tmp_v_addr == NULL)
797 pci_free_consistent(nic->pdev, size,
798 tmp_v_addr, tmp_p_addr);
799 kfree(mac_control->rings[i].rx_blocks[j].rxds);
803 if (nic->rxd_mode >= RXD_MODE_3A) {
804 /* Freeing buffer storage addresses in 2BUFF mode. */
805 for (i = 0; i < config->rx_ring_num; i++) {
806 blk_cnt = config->rx_cfg[i].num_rxd /
807 (rxd_count[nic->rxd_mode] + 1);
808 for (j = 0; j < blk_cnt; j++) {
810 if (!mac_control->rings[i].ba[j])
812 while (k != rxd_count[nic->rxd_mode]) {
814 &mac_control->rings[i].ba[j][k];
819 kfree(mac_control->rings[i].ba[j]);
821 kfree(mac_control->rings[i].ba);
825 if (mac_control->stats_mem) {
826 pci_free_consistent(nic->pdev,
827 mac_control->stats_mem_sz,
828 mac_control->stats_mem,
829 mac_control->stats_mem_phy);
831 if (nic->ufo_in_band_v)
832 kfree(nic->ufo_in_band_v);
836 * s2io_verify_pci_mode -
839 static int s2io_verify_pci_mode(nic_t *nic)
841 XENA_dev_config_t __iomem *bar0 = nic->bar0;
842 register u64 val64 = 0;
845 val64 = readq(&bar0->pci_mode);
846 mode = (u8)GET_PCI_MODE(val64);
848 if ( val64 & PCI_MODE_UNKNOWN_MODE)
849 return -1; /* Unknown PCI mode */
853 #define NEC_VENID 0x1033
854 #define NEC_DEVID 0x0125
855 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
857 struct pci_dev *tdev = NULL;
858 while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
859 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
860 if (tdev->bus == s2io_pdev->bus->parent)
868 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
870 * s2io_print_pci_mode -
872 static int s2io_print_pci_mode(nic_t *nic)
874 XENA_dev_config_t __iomem *bar0 = nic->bar0;
875 register u64 val64 = 0;
877 struct config_param *config = &nic->config;
879 val64 = readq(&bar0->pci_mode);
880 mode = (u8)GET_PCI_MODE(val64);
882 if ( val64 & PCI_MODE_UNKNOWN_MODE)
883 return -1; /* Unknown PCI mode */
885 config->bus_speed = bus_speed[mode];
887 if (s2io_on_nec_bridge(nic->pdev)) {
888 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
893 if (val64 & PCI_MODE_32_BITS) {
894 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
896 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
900 case PCI_MODE_PCI_33:
901 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
903 case PCI_MODE_PCI_66:
904 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
906 case PCI_MODE_PCIX_M1_66:
907 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
909 case PCI_MODE_PCIX_M1_100:
910 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
912 case PCI_MODE_PCIX_M1_133:
913 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
915 case PCI_MODE_PCIX_M2_66:
916 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
918 case PCI_MODE_PCIX_M2_100:
919 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
921 case PCI_MODE_PCIX_M2_133:
922 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
925 return -1; /* Unsupported bus speed */
932 * init_nic - Initialization of hardware
933 * @nic: device peivate variable
934 * Description: The function sequentially configures every block
935 * of the H/W from their reset values.
936 * Return Value: SUCCESS on success and
937 * '-1' on failure (endian settings incorrect).
940 static int init_nic(struct s2io_nic *nic)
942 XENA_dev_config_t __iomem *bar0 = nic->bar0;
943 struct net_device *dev = nic->dev;
944 register u64 val64 = 0;
948 mac_info_t *mac_control;
949 struct config_param *config;
951 unsigned long long mem_share;
954 mac_control = &nic->mac_control;
955 config = &nic->config;
957 /* to set the swapper controle on the card */
958 if(s2io_set_swapper(nic)) {
959 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
964 * Herc requires EOI to be removed from reset before XGXS, so..
966 if (nic->device_type & XFRAME_II_DEVICE) {
967 val64 = 0xA500000000ULL;
968 writeq(val64, &bar0->sw_reset);
970 val64 = readq(&bar0->sw_reset);
973 /* Remove XGXS from reset state */
975 writeq(val64, &bar0->sw_reset);
977 val64 = readq(&bar0->sw_reset);
979 /* Enable Receiving broadcasts */
980 add = &bar0->mac_cfg;
981 val64 = readq(&bar0->mac_cfg);
982 val64 |= MAC_RMAC_BCAST_ENABLE;
983 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
984 writel((u32) val64, add);
985 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
986 writel((u32) (val64 >> 32), (add + 4));
988 /* Read registers in all blocks */
989 val64 = readq(&bar0->mac_int_mask);
990 val64 = readq(&bar0->mc_int_mask);
991 val64 = readq(&bar0->xgxs_int_mask);
995 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
997 if (nic->device_type & XFRAME_II_DEVICE) {
998 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
999 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1000 &bar0->dtx_control, UF);
1002 msleep(1); /* Necessary!! */
1006 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1007 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1008 &bar0->dtx_control, UF);
1009 val64 = readq(&bar0->dtx_control);
1014 /* Tx DMA Initialization */
1016 writeq(val64, &bar0->tx_fifo_partition_0);
1017 writeq(val64, &bar0->tx_fifo_partition_1);
1018 writeq(val64, &bar0->tx_fifo_partition_2);
1019 writeq(val64, &bar0->tx_fifo_partition_3);
1022 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1024 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
1025 13) | vBIT(config->tx_cfg[i].fifo_priority,
1028 if (i == (config->tx_fifo_num - 1)) {
1035 writeq(val64, &bar0->tx_fifo_partition_0);
1039 writeq(val64, &bar0->tx_fifo_partition_1);
1043 writeq(val64, &bar0->tx_fifo_partition_2);
1047 writeq(val64, &bar0->tx_fifo_partition_3);
1053 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1054 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1056 if ((nic->device_type == XFRAME_I_DEVICE) &&
1057 (get_xena_rev_id(nic->pdev) < 4))
1058 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1060 val64 = readq(&bar0->tx_fifo_partition_0);
1061 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1062 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1065 * Initialization of Tx_PA_CONFIG register to ignore packet
1066 * integrity checking.
1068 val64 = readq(&bar0->tx_pa_cfg);
1069 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1070 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1071 writeq(val64, &bar0->tx_pa_cfg);
1073 /* Rx DMA intialization. */
1075 for (i = 0; i < config->rx_ring_num; i++) {
1077 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1080 writeq(val64, &bar0->rx_queue_priority);
1083 * Allocating equal share of memory to all the
1087 if (nic->device_type & XFRAME_II_DEVICE)
1092 for (i = 0; i < config->rx_ring_num; i++) {
1095 mem_share = (mem_size / config->rx_ring_num +
1096 mem_size % config->rx_ring_num);
1097 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1100 mem_share = (mem_size / config->rx_ring_num);
1101 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1104 mem_share = (mem_size / config->rx_ring_num);
1105 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1108 mem_share = (mem_size / config->rx_ring_num);
1109 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1112 mem_share = (mem_size / config->rx_ring_num);
1113 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1116 mem_share = (mem_size / config->rx_ring_num);
1117 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1120 mem_share = (mem_size / config->rx_ring_num);
1121 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1124 mem_share = (mem_size / config->rx_ring_num);
1125 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1129 writeq(val64, &bar0->rx_queue_cfg);
1132 * Filling Tx round robin registers
1133 * as per the number of FIFOs
1135 switch (config->tx_fifo_num) {
1137 val64 = 0x0000000000000000ULL;
1138 writeq(val64, &bar0->tx_w_round_robin_0);
1139 writeq(val64, &bar0->tx_w_round_robin_1);
1140 writeq(val64, &bar0->tx_w_round_robin_2);
1141 writeq(val64, &bar0->tx_w_round_robin_3);
1142 writeq(val64, &bar0->tx_w_round_robin_4);
1145 val64 = 0x0000010000010000ULL;
1146 writeq(val64, &bar0->tx_w_round_robin_0);
1147 val64 = 0x0100000100000100ULL;
1148 writeq(val64, &bar0->tx_w_round_robin_1);
1149 val64 = 0x0001000001000001ULL;
1150 writeq(val64, &bar0->tx_w_round_robin_2);
1151 val64 = 0x0000010000010000ULL;
1152 writeq(val64, &bar0->tx_w_round_robin_3);
1153 val64 = 0x0100000000000000ULL;
1154 writeq(val64, &bar0->tx_w_round_robin_4);
1157 val64 = 0x0001000102000001ULL;
1158 writeq(val64, &bar0->tx_w_round_robin_0);
1159 val64 = 0x0001020000010001ULL;
1160 writeq(val64, &bar0->tx_w_round_robin_1);
1161 val64 = 0x0200000100010200ULL;
1162 writeq(val64, &bar0->tx_w_round_robin_2);
1163 val64 = 0x0001000102000001ULL;
1164 writeq(val64, &bar0->tx_w_round_robin_3);
1165 val64 = 0x0001020000000000ULL;
1166 writeq(val64, &bar0->tx_w_round_robin_4);
1169 val64 = 0x0001020300010200ULL;
1170 writeq(val64, &bar0->tx_w_round_robin_0);
1171 val64 = 0x0100000102030001ULL;
1172 writeq(val64, &bar0->tx_w_round_robin_1);
1173 val64 = 0x0200010000010203ULL;
1174 writeq(val64, &bar0->tx_w_round_robin_2);
1175 val64 = 0x0001020001000001ULL;
1176 writeq(val64, &bar0->tx_w_round_robin_3);
1177 val64 = 0x0203000100000000ULL;
1178 writeq(val64, &bar0->tx_w_round_robin_4);
1181 val64 = 0x0001000203000102ULL;
1182 writeq(val64, &bar0->tx_w_round_robin_0);
1183 val64 = 0x0001020001030004ULL;
1184 writeq(val64, &bar0->tx_w_round_robin_1);
1185 val64 = 0x0001000203000102ULL;
1186 writeq(val64, &bar0->tx_w_round_robin_2);
1187 val64 = 0x0001020001030004ULL;
1188 writeq(val64, &bar0->tx_w_round_robin_3);
1189 val64 = 0x0001000000000000ULL;
1190 writeq(val64, &bar0->tx_w_round_robin_4);
1193 val64 = 0x0001020304000102ULL;
1194 writeq(val64, &bar0->tx_w_round_robin_0);
1195 val64 = 0x0304050001020001ULL;
1196 writeq(val64, &bar0->tx_w_round_robin_1);
1197 val64 = 0x0203000100000102ULL;
1198 writeq(val64, &bar0->tx_w_round_robin_2);
1199 val64 = 0x0304000102030405ULL;
1200 writeq(val64, &bar0->tx_w_round_robin_3);
1201 val64 = 0x0001000200000000ULL;
1202 writeq(val64, &bar0->tx_w_round_robin_4);
1205 val64 = 0x0001020001020300ULL;
1206 writeq(val64, &bar0->tx_w_round_robin_0);
1207 val64 = 0x0102030400010203ULL;
1208 writeq(val64, &bar0->tx_w_round_robin_1);
1209 val64 = 0x0405060001020001ULL;
1210 writeq(val64, &bar0->tx_w_round_robin_2);
1211 val64 = 0x0304050000010200ULL;
1212 writeq(val64, &bar0->tx_w_round_robin_3);
1213 val64 = 0x0102030000000000ULL;
1214 writeq(val64, &bar0->tx_w_round_robin_4);
1217 val64 = 0x0001020300040105ULL;
1218 writeq(val64, &bar0->tx_w_round_robin_0);
1219 val64 = 0x0200030106000204ULL;
1220 writeq(val64, &bar0->tx_w_round_robin_1);
1221 val64 = 0x0103000502010007ULL;
1222 writeq(val64, &bar0->tx_w_round_robin_2);
1223 val64 = 0x0304010002060500ULL;
1224 writeq(val64, &bar0->tx_w_round_robin_3);
1225 val64 = 0x0103020400000000ULL;
1226 writeq(val64, &bar0->tx_w_round_robin_4);
1230 /* Enable all configured Tx FIFO partitions */
1231 val64 = readq(&bar0->tx_fifo_partition_0);
1232 val64 |= (TX_FIFO_PARTITION_EN);
1233 writeq(val64, &bar0->tx_fifo_partition_0);
1235 /* Filling the Rx round robin registers as per the
1236 * number of Rings and steering based on QoS.
1238 switch (config->rx_ring_num) {
1240 val64 = 0x8080808080808080ULL;
1241 writeq(val64, &bar0->rts_qos_steering);
1244 val64 = 0x0000010000010000ULL;
1245 writeq(val64, &bar0->rx_w_round_robin_0);
1246 val64 = 0x0100000100000100ULL;
1247 writeq(val64, &bar0->rx_w_round_robin_1);
1248 val64 = 0x0001000001000001ULL;
1249 writeq(val64, &bar0->rx_w_round_robin_2);
1250 val64 = 0x0000010000010000ULL;
1251 writeq(val64, &bar0->rx_w_round_robin_3);
1252 val64 = 0x0100000000000000ULL;
1253 writeq(val64, &bar0->rx_w_round_robin_4);
1255 val64 = 0x8080808040404040ULL;
1256 writeq(val64, &bar0->rts_qos_steering);
1259 val64 = 0x0001000102000001ULL;
1260 writeq(val64, &bar0->rx_w_round_robin_0);
1261 val64 = 0x0001020000010001ULL;
1262 writeq(val64, &bar0->rx_w_round_robin_1);
1263 val64 = 0x0200000100010200ULL;
1264 writeq(val64, &bar0->rx_w_round_robin_2);
1265 val64 = 0x0001000102000001ULL;
1266 writeq(val64, &bar0->rx_w_round_robin_3);
1267 val64 = 0x0001020000000000ULL;
1268 writeq(val64, &bar0->rx_w_round_robin_4);
1270 val64 = 0x8080804040402020ULL;
1271 writeq(val64, &bar0->rts_qos_steering);
1274 val64 = 0x0001020300010200ULL;
1275 writeq(val64, &bar0->rx_w_round_robin_0);
1276 val64 = 0x0100000102030001ULL;
1277 writeq(val64, &bar0->rx_w_round_robin_1);
1278 val64 = 0x0200010000010203ULL;
1279 writeq(val64, &bar0->rx_w_round_robin_2);
1280 val64 = 0x0001020001000001ULL;
1281 writeq(val64, &bar0->rx_w_round_robin_3);
1282 val64 = 0x0203000100000000ULL;
1283 writeq(val64, &bar0->rx_w_round_robin_4);
1285 val64 = 0x8080404020201010ULL;
1286 writeq(val64, &bar0->rts_qos_steering);
1289 val64 = 0x0001000203000102ULL;
1290 writeq(val64, &bar0->rx_w_round_robin_0);
1291 val64 = 0x0001020001030004ULL;
1292 writeq(val64, &bar0->rx_w_round_robin_1);
1293 val64 = 0x0001000203000102ULL;
1294 writeq(val64, &bar0->rx_w_round_robin_2);
1295 val64 = 0x0001020001030004ULL;
1296 writeq(val64, &bar0->rx_w_round_robin_3);
1297 val64 = 0x0001000000000000ULL;
1298 writeq(val64, &bar0->rx_w_round_robin_4);
1300 val64 = 0x8080404020201008ULL;
1301 writeq(val64, &bar0->rts_qos_steering);
1304 val64 = 0x0001020304000102ULL;
1305 writeq(val64, &bar0->rx_w_round_robin_0);
1306 val64 = 0x0304050001020001ULL;
1307 writeq(val64, &bar0->rx_w_round_robin_1);
1308 val64 = 0x0203000100000102ULL;
1309 writeq(val64, &bar0->rx_w_round_robin_2);
1310 val64 = 0x0304000102030405ULL;
1311 writeq(val64, &bar0->rx_w_round_robin_3);
1312 val64 = 0x0001000200000000ULL;
1313 writeq(val64, &bar0->rx_w_round_robin_4);
1315 val64 = 0x8080404020100804ULL;
1316 writeq(val64, &bar0->rts_qos_steering);
1319 val64 = 0x0001020001020300ULL;
1320 writeq(val64, &bar0->rx_w_round_robin_0);
1321 val64 = 0x0102030400010203ULL;
1322 writeq(val64, &bar0->rx_w_round_robin_1);
1323 val64 = 0x0405060001020001ULL;
1324 writeq(val64, &bar0->rx_w_round_robin_2);
1325 val64 = 0x0304050000010200ULL;
1326 writeq(val64, &bar0->rx_w_round_robin_3);
1327 val64 = 0x0102030000000000ULL;
1328 writeq(val64, &bar0->rx_w_round_robin_4);
1330 val64 = 0x8080402010080402ULL;
1331 writeq(val64, &bar0->rts_qos_steering);
1334 val64 = 0x0001020300040105ULL;
1335 writeq(val64, &bar0->rx_w_round_robin_0);
1336 val64 = 0x0200030106000204ULL;
1337 writeq(val64, &bar0->rx_w_round_robin_1);
1338 val64 = 0x0103000502010007ULL;
1339 writeq(val64, &bar0->rx_w_round_robin_2);
1340 val64 = 0x0304010002060500ULL;
1341 writeq(val64, &bar0->rx_w_round_robin_3);
1342 val64 = 0x0103020400000000ULL;
1343 writeq(val64, &bar0->rx_w_round_robin_4);
1345 val64 = 0x8040201008040201ULL;
1346 writeq(val64, &bar0->rts_qos_steering);
1352 for (i = 0; i < 8; i++)
1353 writeq(val64, &bar0->rts_frm_len_n[i]);
1355 /* Set the default rts frame length for the rings configured */
1356 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1357 for (i = 0 ; i < config->rx_ring_num ; i++)
1358 writeq(val64, &bar0->rts_frm_len_n[i]);
1360 /* Set the frame length for the configured rings
1361 * desired by the user
1363 for (i = 0; i < config->rx_ring_num; i++) {
1364 /* If rts_frm_len[i] == 0 then it is assumed that user not
1365 * specified frame length steering.
1366 * If the user provides the frame length then program
1367 * the rts_frm_len register for those values or else
1368 * leave it as it is.
1370 if (rts_frm_len[i] != 0) {
1371 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1372 &bar0->rts_frm_len_n[i]);
1376 /* Program statistics memory */
1377 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1379 if (nic->device_type == XFRAME_II_DEVICE) {
1380 val64 = STAT_BC(0x320);
1381 writeq(val64, &bar0->stat_byte_cnt);
1385 * Initializing the sampling rate for the device to calculate the
1386 * bandwidth utilization.
1388 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1389 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1390 writeq(val64, &bar0->mac_link_util);
1394 * Initializing the Transmit and Receive Traffic Interrupt
1398 * TTI Initialization. Default Tx timer gets us about
1399 * 250 interrupts per sec. Continuous interrupts are enabled
1402 if (nic->device_type == XFRAME_II_DEVICE) {
1403 int count = (nic->config.bus_speed * 125)/2;
1404 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1407 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1409 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1410 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1411 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1412 if (use_continuous_tx_intrs)
1413 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1414 writeq(val64, &bar0->tti_data1_mem);
1416 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1417 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1418 TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1419 writeq(val64, &bar0->tti_data2_mem);
1421 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1422 writeq(val64, &bar0->tti_command_mem);
1425 * Once the operation completes, the Strobe bit of the command
1426 * register will be reset. We poll for this particular condition
1427 * We wait for a maximum of 500ms for the operation to complete,
1428 * if it's not complete by then we return error.
1432 val64 = readq(&bar0->tti_command_mem);
1433 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1437 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1445 if (nic->config.bimodal) {
1447 for (k = 0; k < config->rx_ring_num; k++) {
1448 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1449 val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1450 writeq(val64, &bar0->tti_command_mem);
1453 * Once the operation completes, the Strobe bit of the command
1454 * register will be reset. We poll for this particular condition
1455 * We wait for a maximum of 500ms for the operation to complete,
1456 * if it's not complete by then we return error.
1460 val64 = readq(&bar0->tti_command_mem);
1461 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1466 "%s: TTI init Failed\n",
1476 /* RTI Initialization */
1477 if (nic->device_type == XFRAME_II_DEVICE) {
1479 * Programmed to generate Apprx 500 Intrs per
1482 int count = (nic->config.bus_speed * 125)/4;
1483 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1485 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1487 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1488 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1489 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1491 writeq(val64, &bar0->rti_data1_mem);
1493 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1494 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1495 if (nic->intr_type == MSI_X)
1496 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1497 RTI_DATA2_MEM_RX_UFC_D(0x40));
1499 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1500 RTI_DATA2_MEM_RX_UFC_D(0x80));
1501 writeq(val64, &bar0->rti_data2_mem);
1503 for (i = 0; i < config->rx_ring_num; i++) {
1504 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1505 | RTI_CMD_MEM_OFFSET(i);
1506 writeq(val64, &bar0->rti_command_mem);
1509 * Once the operation completes, the Strobe bit of the
1510 * command register will be reset. We poll for this
1511 * particular condition. We wait for a maximum of 500ms
1512 * for the operation to complete, if it's not complete
1513 * by then we return error.
1517 val64 = readq(&bar0->rti_command_mem);
1518 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1522 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1533 * Initializing proper values as Pause threshold into all
1534 * the 8 Queues on Rx side.
1536 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1537 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1539 /* Disable RMAC PAD STRIPPING */
1540 add = &bar0->mac_cfg;
1541 val64 = readq(&bar0->mac_cfg);
1542 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1543 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1544 writel((u32) (val64), add);
1545 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1546 writel((u32) (val64 >> 32), (add + 4));
1547 val64 = readq(&bar0->mac_cfg);
1549 /* Enable FCS stripping by adapter */
1550 add = &bar0->mac_cfg;
1551 val64 = readq(&bar0->mac_cfg);
1552 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1553 if (nic->device_type == XFRAME_II_DEVICE)
1554 writeq(val64, &bar0->mac_cfg);
1556 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1557 writel((u32) (val64), add);
1558 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1559 writel((u32) (val64 >> 32), (add + 4));
1563 * Set the time value to be inserted in the pause frame
1564 * generated by xena.
1566 val64 = readq(&bar0->rmac_pause_cfg);
1567 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1568 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1569 writeq(val64, &bar0->rmac_pause_cfg);
1572 * Set the Threshold Limit for Generating the pause frame
1573 * If the amount of data in any Queue exceeds ratio of
1574 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1575 * pause frame is generated
1578 for (i = 0; i < 4; i++) {
1580 (((u64) 0xFF00 | nic->mac_control.
1581 mc_pause_threshold_q0q3)
1584 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1587 for (i = 0; i < 4; i++) {
1589 (((u64) 0xFF00 | nic->mac_control.
1590 mc_pause_threshold_q4q7)
1593 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1596 * TxDMA will stop Read request if the number of read split has
1597 * exceeded the limit pointed by shared_splits
1599 val64 = readq(&bar0->pic_control);
1600 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1601 writeq(val64, &bar0->pic_control);
1603 if (nic->config.bus_speed == 266) {
1604 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1605 writeq(0x0, &bar0->read_retry_delay);
1606 writeq(0x0, &bar0->write_retry_delay);
1610 * Programming the Herc to split every write transaction
1611 * that does not start on an ADB to reduce disconnects.
1613 if (nic->device_type == XFRAME_II_DEVICE) {
1614 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1615 MISC_LINK_STABILITY_PRD(3);
1616 writeq(val64, &bar0->misc_control);
1617 val64 = readq(&bar0->pic_control2);
1618 val64 &= ~(BIT(13)|BIT(14)|BIT(15));
1619 writeq(val64, &bar0->pic_control2);
1621 if (strstr(nic->product_name, "CX4")) {
1622 val64 = TMAC_AVG_IPG(0x17);
1623 writeq(val64, &bar0->tmac_avg_ipg);
1628 #define LINK_UP_DOWN_INTERRUPT 1
1629 #define MAC_RMAC_ERR_TIMER 2
1631 static int s2io_link_fault_indication(nic_t *nic)
1633 if (nic->intr_type != INTA)
1634 return MAC_RMAC_ERR_TIMER;
1635 if (nic->device_type == XFRAME_II_DEVICE)
1636 return LINK_UP_DOWN_INTERRUPT;
1638 return MAC_RMAC_ERR_TIMER;
1642 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1643 * @nic: device private variable,
1644 * @mask: A mask indicating which Intr block must be modified and,
1645 * @flag: A flag indicating whether to enable or disable the Intrs.
1646 * Description: This function will either disable or enable the interrupts
1647 * depending on the flag argument. The mask argument can be used to
1648 * enable/disable any Intr block.
1649 * Return Value: NONE.
1652 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1654 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1655 register u64 val64 = 0, temp64 = 0;
1657 /* Top level interrupt classification */
1658 /* PIC Interrupts */
1659 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1660 /* Enable PIC Intrs in the general intr mask register */
1661 val64 = TXPIC_INT_M | PIC_RX_INT_M;
1662 if (flag == ENABLE_INTRS) {
1663 temp64 = readq(&bar0->general_int_mask);
1664 temp64 &= ~((u64) val64);
1665 writeq(temp64, &bar0->general_int_mask);
1667 * If Hercules adapter enable GPIO otherwise
1668 * disable all PCIX, Flash, MDIO, IIC and GPIO
1669 * interrupts for now.
1672 if (s2io_link_fault_indication(nic) ==
1673 LINK_UP_DOWN_INTERRUPT ) {
1674 temp64 = readq(&bar0->pic_int_mask);
1675 temp64 &= ~((u64) PIC_INT_GPIO);
1676 writeq(temp64, &bar0->pic_int_mask);
1677 temp64 = readq(&bar0->gpio_int_mask);
1678 temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP);
1679 writeq(temp64, &bar0->gpio_int_mask);
1681 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1684 * No MSI Support is available presently, so TTI and
1685 * RTI interrupts are also disabled.
1687 } else if (flag == DISABLE_INTRS) {
1689 * Disable PIC Intrs in the general
1690 * intr mask register
1692 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1693 temp64 = readq(&bar0->general_int_mask);
1695 writeq(val64, &bar0->general_int_mask);
1699 /* DMA Interrupts */
1700 /* Enabling/Disabling Tx DMA interrupts */
1701 if (mask & TX_DMA_INTR) {
1702 /* Enable TxDMA Intrs in the general intr mask register */
1703 val64 = TXDMA_INT_M;
1704 if (flag == ENABLE_INTRS) {
1705 temp64 = readq(&bar0->general_int_mask);
1706 temp64 &= ~((u64) val64);
1707 writeq(temp64, &bar0->general_int_mask);
1709 * Keep all interrupts other than PFC interrupt
1710 * and PCC interrupt disabled in DMA level.
1712 val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1714 writeq(val64, &bar0->txdma_int_mask);
1716 * Enable only the MISC error 1 interrupt in PFC block
1718 val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1719 writeq(val64, &bar0->pfc_err_mask);
1721 * Enable only the FB_ECC error interrupt in PCC block
1723 val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1724 writeq(val64, &bar0->pcc_err_mask);
1725 } else if (flag == DISABLE_INTRS) {
1727 * Disable TxDMA Intrs in the general intr mask
1730 writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1731 writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1732 temp64 = readq(&bar0->general_int_mask);
1734 writeq(val64, &bar0->general_int_mask);
1738 /* Enabling/Disabling Rx DMA interrupts */
1739 if (mask & RX_DMA_INTR) {
1740 /* Enable RxDMA Intrs in the general intr mask register */
1741 val64 = RXDMA_INT_M;
1742 if (flag == ENABLE_INTRS) {
1743 temp64 = readq(&bar0->general_int_mask);
1744 temp64 &= ~((u64) val64);
1745 writeq(temp64, &bar0->general_int_mask);
1747 * All RxDMA block interrupts are disabled for now
1750 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1751 } else if (flag == DISABLE_INTRS) {
1753 * Disable RxDMA Intrs in the general intr mask
1756 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1757 temp64 = readq(&bar0->general_int_mask);
1759 writeq(val64, &bar0->general_int_mask);
1763 /* MAC Interrupts */
1764 /* Enabling/Disabling MAC interrupts */
1765 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1766 val64 = TXMAC_INT_M | RXMAC_INT_M;
1767 if (flag == ENABLE_INTRS) {
1768 temp64 = readq(&bar0->general_int_mask);
1769 temp64 &= ~((u64) val64);
1770 writeq(temp64, &bar0->general_int_mask);
1772 * All MAC block error interrupts are disabled for now
1775 } else if (flag == DISABLE_INTRS) {
1777 * Disable MAC Intrs in the general intr mask register
1779 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1780 writeq(DISABLE_ALL_INTRS,
1781 &bar0->mac_rmac_err_mask);
1783 temp64 = readq(&bar0->general_int_mask);
1785 writeq(val64, &bar0->general_int_mask);
1789 /* XGXS Interrupts */
1790 if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1791 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1792 if (flag == ENABLE_INTRS) {
1793 temp64 = readq(&bar0->general_int_mask);
1794 temp64 &= ~((u64) val64);
1795 writeq(temp64, &bar0->general_int_mask);
1797 * All XGXS block error interrupts are disabled for now
1800 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1801 } else if (flag == DISABLE_INTRS) {
1803 * Disable MC Intrs in the general intr mask register
1805 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1806 temp64 = readq(&bar0->general_int_mask);
1808 writeq(val64, &bar0->general_int_mask);
1812 /* Memory Controller(MC) interrupts */
1813 if (mask & MC_INTR) {
1815 if (flag == ENABLE_INTRS) {
1816 temp64 = readq(&bar0->general_int_mask);
1817 temp64 &= ~((u64) val64);
1818 writeq(temp64, &bar0->general_int_mask);
1820 * Enable all MC Intrs.
1822 writeq(0x0, &bar0->mc_int_mask);
1823 writeq(0x0, &bar0->mc_err_mask);
1824 } else if (flag == DISABLE_INTRS) {
1826 * Disable MC Intrs in the general intr mask register
1828 writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1829 temp64 = readq(&bar0->general_int_mask);
1831 writeq(val64, &bar0->general_int_mask);
1836 /* Tx traffic interrupts */
1837 if (mask & TX_TRAFFIC_INTR) {
1838 val64 = TXTRAFFIC_INT_M;
1839 if (flag == ENABLE_INTRS) {
1840 temp64 = readq(&bar0->general_int_mask);
1841 temp64 &= ~((u64) val64);
1842 writeq(temp64, &bar0->general_int_mask);
1844 * Enable all the Tx side interrupts
1845 * writing 0 Enables all 64 TX interrupt levels
1847 writeq(0x0, &bar0->tx_traffic_mask);
1848 } else if (flag == DISABLE_INTRS) {
1850 * Disable Tx Traffic Intrs in the general intr mask
1853 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1854 temp64 = readq(&bar0->general_int_mask);
1856 writeq(val64, &bar0->general_int_mask);
1860 /* Rx traffic interrupts */
1861 if (mask & RX_TRAFFIC_INTR) {
1862 val64 = RXTRAFFIC_INT_M;
1863 if (flag == ENABLE_INTRS) {
1864 temp64 = readq(&bar0->general_int_mask);
1865 temp64 &= ~((u64) val64);
1866 writeq(temp64, &bar0->general_int_mask);
1867 /* writing 0 Enables all 8 RX interrupt levels */
1868 writeq(0x0, &bar0->rx_traffic_mask);
1869 } else if (flag == DISABLE_INTRS) {
1871 * Disable Rx Traffic Intrs in the general intr mask
1874 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1875 temp64 = readq(&bar0->general_int_mask);
1877 writeq(val64, &bar0->general_int_mask);
1883 * verify_pcc_quiescent- Checks for PCC quiescent state
1884 * Return: 1 If PCC is quiescence
1885 * 0 If PCC is not quiescence
1887 static int verify_pcc_quiescent(nic_t *sp, int flag)
1890 XENA_dev_config_t __iomem *bar0 = sp->bar0;
1891 u64 val64 = readq(&bar0->adapter_status);
1893 herc = (sp->device_type == XFRAME_II_DEVICE);
1895 if (flag == FALSE) {
1896 if ((!herc && (get_xena_rev_id(sp->pdev) >= 4)) || herc) {
1897 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
1900 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
1904 if ((!herc && (get_xena_rev_id(sp->pdev) >= 4)) || herc) {
1905 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1906 ADAPTER_STATUS_RMAC_PCC_IDLE))
1909 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1910 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
1918 * verify_xena_quiescence - Checks whether the H/W is ready
1919 * Description: Returns whether the H/W is ready to go or not. Depending
1920 * on whether adapter enable bit was written or not the comparison
1921 * differs and the calling function passes the input argument flag to
1923 * Return: 1 If xena is quiescence
1924 * 0 If Xena is not quiescence
1927 static int verify_xena_quiescence(nic_t *sp)
1930 XENA_dev_config_t __iomem *bar0 = sp->bar0;
1931 u64 val64 = readq(&bar0->adapter_status);
1932 mode = s2io_verify_pci_mode(sp);
1934 if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
1935 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
1938 if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
1939 DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
1942 if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
1943 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
1946 if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
1947 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
1950 if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
1951 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
1954 if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
1955 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
1958 if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
1959 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
1962 if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
1963 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
1968 * In PCI 33 mode, the P_PLL is not used, and therefore,
1969 * the the P_PLL_LOCK bit in the adapter_status register will
1972 if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
1973 sp->device_type == XFRAME_II_DEVICE && mode !=
1975 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
1978 if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1979 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1980 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
1987 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1988 * @sp: Pointer to device specifc structure
1990 * New procedure to clear mac address reading problems on Alpha platforms
1994 static void fix_mac_address(nic_t * sp)
1996 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2000 while (fix_mac[i] != END_SIGN) {
2001 writeq(fix_mac[i++], &bar0->gpio_control);
2003 val64 = readq(&bar0->gpio_control);
2008 * start_nic - Turns the device on
2009 * @nic : device private variable.
2011 * This function actually turns the device on. Before this function is
2012 * called,all Registers are configured from their reset states
2013 * and shared memory is allocated but the NIC is still quiescent. On
2014 * calling this function, the device interrupts are cleared and the NIC is
2015 * literally switched on by writing into the adapter control register.
2017 * SUCCESS on success and -1 on failure.
2020 static int start_nic(struct s2io_nic *nic)
2022 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2023 struct net_device *dev = nic->dev;
2024 register u64 val64 = 0;
2026 mac_info_t *mac_control;
2027 struct config_param *config;
2029 mac_control = &nic->mac_control;
2030 config = &nic->config;
2032 /* PRC Initialization and configuration */
2033 for (i = 0; i < config->rx_ring_num; i++) {
2034 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
2035 &bar0->prc_rxd0_n[i]);
2037 val64 = readq(&bar0->prc_ctrl_n[i]);
2038 if (nic->config.bimodal)
2039 val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
2040 if (nic->rxd_mode == RXD_MODE_1)
2041 val64 |= PRC_CTRL_RC_ENABLED;
2043 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2044 if (nic->device_type == XFRAME_II_DEVICE)
2045 val64 |= PRC_CTRL_GROUP_READS;
2046 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2047 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2048 writeq(val64, &bar0->prc_ctrl_n[i]);
2051 if (nic->rxd_mode == RXD_MODE_3B) {
2052 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2053 val64 = readq(&bar0->rx_pa_cfg);
2054 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2055 writeq(val64, &bar0->rx_pa_cfg);
2059 * Enabling MC-RLDRAM. After enabling the device, we timeout
2060 * for around 100ms, which is approximately the time required
2061 * for the device to be ready for operation.
2063 val64 = readq(&bar0->mc_rldram_mrs);
2064 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2065 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2066 val64 = readq(&bar0->mc_rldram_mrs);
2068 msleep(100); /* Delay by around 100 ms. */
2070 /* Enabling ECC Protection. */
2071 val64 = readq(&bar0->adapter_control);
2072 val64 &= ~ADAPTER_ECC_EN;
2073 writeq(val64, &bar0->adapter_control);
2076 * Clearing any possible Link state change interrupts that
2077 * could have popped up just before Enabling the card.
2079 val64 = readq(&bar0->mac_rmac_err_reg);
2081 writeq(val64, &bar0->mac_rmac_err_reg);
2084 * Verify if the device is ready to be enabled, if so enable
2087 val64 = readq(&bar0->adapter_status);
2088 if (!verify_xena_quiescence(nic)) {
2089 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2090 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2091 (unsigned long long) val64);
2096 * With some switches, link might be already up at this point.
2097 * Because of this weird behavior, when we enable laser,
2098 * we may not get link. We need to handle this. We cannot
2099 * figure out which switch is misbehaving. So we are forced to
2100 * make a global change.
2103 /* Enabling Laser. */
2104 val64 = readq(&bar0->adapter_control);
2105 val64 |= ADAPTER_EOI_TX_ON;
2106 writeq(val64, &bar0->adapter_control);
2108 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2110 * Dont see link state interrupts initally on some switches,
2111 * so directly scheduling the link state task here.
2113 schedule_work(&nic->set_link_task);
2115 /* SXE-002: Initialize link and activity LED */
2116 subid = nic->pdev->subsystem_device;
2117 if (((subid & 0xFF) >= 0x07) &&
2118 (nic->device_type == XFRAME_I_DEVICE)) {
2119 val64 = readq(&bar0->gpio_control);
2120 val64 |= 0x0000800000000000ULL;
2121 writeq(val64, &bar0->gpio_control);
2122 val64 = 0x0411040400000000ULL;
2123 writeq(val64, (void __iomem *)bar0 + 0x2700);
2129 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2131 static struct sk_buff *s2io_txdl_getskb(fifo_info_t *fifo_data, TxD_t *txdlp, int get_off)
2133 nic_t *nic = fifo_data->nic;
2134 struct sk_buff *skb;
2139 if (txds->Host_Control == (u64)(long)nic->ufo_in_band_v) {
2140 pci_unmap_single(nic->pdev, (dma_addr_t)
2141 txds->Buffer_Pointer, sizeof(u64),
2146 skb = (struct sk_buff *) ((unsigned long)
2147 txds->Host_Control);
2149 memset(txdlp, 0, (sizeof(TxD_t) * fifo_data->max_txds));
2152 pci_unmap_single(nic->pdev, (dma_addr_t)
2153 txds->Buffer_Pointer,
2154 skb->len - skb->data_len,
2156 frg_cnt = skb_shinfo(skb)->nr_frags;
2159 for (j = 0; j < frg_cnt; j++, txds++) {
2160 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2161 if (!txds->Buffer_Pointer)
2163 pci_unmap_page(nic->pdev, (dma_addr_t)
2164 txds->Buffer_Pointer,
2165 frag->size, PCI_DMA_TODEVICE);
2168 memset(txdlp,0, (sizeof(TxD_t) * fifo_data->max_txds));
2173 * free_tx_buffers - Free all queued Tx buffers
2174 * @nic : device private variable.
2176 * Free all queued Tx buffers.
2177 * Return Value: void
2180 static void free_tx_buffers(struct s2io_nic *nic)
2182 struct net_device *dev = nic->dev;
2183 struct sk_buff *skb;
2186 mac_info_t *mac_control;
2187 struct config_param *config;
2190 mac_control = &nic->mac_control;
2191 config = &nic->config;
2193 for (i = 0; i < config->tx_fifo_num; i++) {
2194 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2195 txdp = (TxD_t *) mac_control->fifos[i].list_info[j].
2197 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2204 "%s:forcibly freeing %d skbs on FIFO%d\n",
2206 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2207 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2212 * stop_nic - To stop the nic
2213 * @nic ; device private variable.
2215 * This function does exactly the opposite of what the start_nic()
2216 * function does. This function is called to stop the device.
2221 static void stop_nic(struct s2io_nic *nic)
2223 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2224 register u64 val64 = 0;
2226 mac_info_t *mac_control;
2227 struct config_param *config;
2229 mac_control = &nic->mac_control;
2230 config = &nic->config;
2232 /* Disable all interrupts */
2233 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2234 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2235 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
2236 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2238 /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2239 val64 = readq(&bar0->adapter_control);
2240 val64 &= ~(ADAPTER_CNTL_EN);
2241 writeq(val64, &bar0->adapter_control);
2244 static int fill_rxd_3buf(nic_t *nic, RxD_t *rxdp, struct sk_buff *skb)
2246 struct net_device *dev = nic->dev;
2247 struct sk_buff *frag_list;
2250 /* Buffer-1 receives L3/L4 headers */
2251 ((RxD3_t*)rxdp)->Buffer1_ptr = pci_map_single
2252 (nic->pdev, skb->data, l3l4hdr_size + 4,
2253 PCI_DMA_FROMDEVICE);
2255 /* skb_shinfo(skb)->frag_list will have L4 data payload */
2256 skb_shinfo(skb)->frag_list = dev_alloc_skb(dev->mtu + ALIGN_SIZE);
2257 if (skb_shinfo(skb)->frag_list == NULL) {
2258 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n ", dev->name);
2261 frag_list = skb_shinfo(skb)->frag_list;
2262 frag_list->next = NULL;
2263 tmp = (void *)ALIGN((long)frag_list->data, ALIGN_SIZE + 1);
2264 frag_list->data = tmp;
2265 frag_list->tail = tmp;
2267 /* Buffer-2 receives L4 data payload */
2268 ((RxD3_t*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev,
2269 frag_list->data, dev->mtu,
2270 PCI_DMA_FROMDEVICE);
2271 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
2272 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
2278 * fill_rx_buffers - Allocates the Rx side skbs
2279 * @nic: device private variable
2280 * @ring_no: ring number
2282 * The function allocates Rx side skbs and puts the physical
2283 * address of these buffers into the RxD buffer pointers, so that the NIC
2284 * can DMA the received frame into these locations.
2285 * The NIC supports 3 receive modes, viz
2287 * 2. three buffer and
2288 * 3. Five buffer modes.
2289 * Each mode defines how many fragments the received frame will be split
2290 * up into by the NIC. The frame is split into L3 header, L4 Header,
2291 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2292 * is split into 3 fragments. As of now only single buffer mode is
2295 * SUCCESS on success or an appropriate -ve value on failure.
2298 static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2300 struct net_device *dev = nic->dev;
2301 struct sk_buff *skb;
2303 int off, off1, size, block_no, block_no1;
2306 mac_info_t *mac_control;
2307 struct config_param *config;
2310 unsigned long flags;
2311 RxD_t *first_rxdp = NULL;
2313 mac_control = &nic->mac_control;
2314 config = &nic->config;
2315 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2316 atomic_read(&nic->rx_bufs_left[ring_no]);
2318 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
2319 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2320 while (alloc_tab < alloc_cnt) {
2321 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2323 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2325 rxdp = mac_control->rings[ring_no].
2326 rx_blocks[block_no].rxds[off].virt_addr;
2328 if ((block_no == block_no1) && (off == off1) &&
2329 (rxdp->Host_Control)) {
2330 DBG_PRINT(INTR_DBG, "%s: Get and Put",
2332 DBG_PRINT(INTR_DBG, " info equated\n");
2335 if (off && (off == rxd_count[nic->rxd_mode])) {
2336 mac_control->rings[ring_no].rx_curr_put_info.
2338 if (mac_control->rings[ring_no].rx_curr_put_info.
2339 block_index == mac_control->rings[ring_no].
2341 mac_control->rings[ring_no].rx_curr_put_info.
2343 block_no = mac_control->rings[ring_no].
2344 rx_curr_put_info.block_index;
2345 if (off == rxd_count[nic->rxd_mode])
2347 mac_control->rings[ring_no].rx_curr_put_info.
2349 rxdp = mac_control->rings[ring_no].
2350 rx_blocks[block_no].block_virt_addr;
2351 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2355 spin_lock_irqsave(&nic->put_lock, flags);
2356 mac_control->rings[ring_no].put_pos =
2357 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2358 spin_unlock_irqrestore(&nic->put_lock, flags);
2360 mac_control->rings[ring_no].put_pos =
2361 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2363 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2364 ((nic->rxd_mode >= RXD_MODE_3A) &&
2365 (rxdp->Control_2 & BIT(0)))) {
2366 mac_control->rings[ring_no].rx_curr_put_info.
2370 /* calculate size of skb based on ring mode */
2371 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2372 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2373 if (nic->rxd_mode == RXD_MODE_1)
2374 size += NET_IP_ALIGN;
2375 else if (nic->rxd_mode == RXD_MODE_3B)
2376 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2378 size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
2381 skb = dev_alloc_skb(size);
2383 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
2384 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
2387 first_rxdp->Control_1 |= RXD_OWN_XENA;
2391 if (nic->rxd_mode == RXD_MODE_1) {
2392 /* 1 buffer mode - normal operation mode */
2393 memset(rxdp, 0, sizeof(RxD1_t));
2394 skb_reserve(skb, NET_IP_ALIGN);
2395 ((RxD1_t*)rxdp)->Buffer0_ptr = pci_map_single
2396 (nic->pdev, skb->data, size - NET_IP_ALIGN,
2397 PCI_DMA_FROMDEVICE);
2398 rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2400 } else if (nic->rxd_mode >= RXD_MODE_3A) {
2402 * 2 or 3 buffer mode -
2403 * Both 2 buffer mode and 3 buffer mode provides 128
2404 * byte aligned receive buffers.
2406 * 3 buffer mode provides header separation where in
2407 * skb->data will have L3/L4 headers where as
2408 * skb_shinfo(skb)->frag_list will have the L4 data
2412 memset(rxdp, 0, sizeof(RxD3_t));
2413 ba = &mac_control->rings[ring_no].ba[block_no][off];
2414 skb_reserve(skb, BUF0_LEN);
2415 tmp = (u64)(unsigned long) skb->data;
2418 skb->data = (void *) (unsigned long)tmp;
2419 skb->tail = (void *) (unsigned long)tmp;
2421 if (!(((RxD3_t*)rxdp)->Buffer0_ptr))
2422 ((RxD3_t*)rxdp)->Buffer0_ptr =
2423 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2424 PCI_DMA_FROMDEVICE);
2426 pci_dma_sync_single_for_device(nic->pdev,
2427 (dma_addr_t) ((RxD3_t*)rxdp)->Buffer0_ptr,
2428 BUF0_LEN, PCI_DMA_FROMDEVICE);
2429 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2430 if (nic->rxd_mode == RXD_MODE_3B) {
2431 /* Two buffer mode */
2434 * Buffer2 will have L3/L4 header plus
2437 ((RxD3_t*)rxdp)->Buffer2_ptr = pci_map_single
2438 (nic->pdev, skb->data, dev->mtu + 4,
2439 PCI_DMA_FROMDEVICE);
2441 /* Buffer-1 will be dummy buffer. Not used */
2442 if (!(((RxD3_t*)rxdp)->Buffer1_ptr)) {
2443 ((RxD3_t*)rxdp)->Buffer1_ptr =
2444 pci_map_single(nic->pdev,
2446 PCI_DMA_FROMDEVICE);
2448 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2449 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2453 if (fill_rxd_3buf(nic, rxdp, skb) == -ENOMEM) {
2454 dev_kfree_skb_irq(skb);
2457 first_rxdp->Control_1 |=
2463 rxdp->Control_2 |= BIT(0);
2465 rxdp->Host_Control = (unsigned long) (skb);
2466 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2467 rxdp->Control_1 |= RXD_OWN_XENA;
2469 if (off == (rxd_count[nic->rxd_mode] + 1))
2471 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2473 rxdp->Control_2 |= SET_RXD_MARKER;
2474 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2477 first_rxdp->Control_1 |= RXD_OWN_XENA;
2481 atomic_inc(&nic->rx_bufs_left[ring_no]);
2486 /* Transfer ownership of first descriptor to adapter just before
2487 * exiting. Before that, use memory barrier so that ownership
2488 * and other fields are seen by adapter correctly.
2492 first_rxdp->Control_1 |= RXD_OWN_XENA;
2498 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2500 struct net_device *dev = sp->dev;
2502 struct sk_buff *skb;
2504 mac_info_t *mac_control;
2507 mac_control = &sp->mac_control;
2508 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2509 rxdp = mac_control->rings[ring_no].
2510 rx_blocks[blk].rxds[j].virt_addr;
2511 skb = (struct sk_buff *)
2512 ((unsigned long) rxdp->Host_Control);
2516 if (sp->rxd_mode == RXD_MODE_1) {
2517 pci_unmap_single(sp->pdev, (dma_addr_t)
2518 ((RxD1_t*)rxdp)->Buffer0_ptr,
2520 HEADER_ETHERNET_II_802_3_SIZE
2521 + HEADER_802_2_SIZE +
2523 PCI_DMA_FROMDEVICE);
2524 memset(rxdp, 0, sizeof(RxD1_t));
2525 } else if(sp->rxd_mode == RXD_MODE_3B) {
2526 ba = &mac_control->rings[ring_no].
2528 pci_unmap_single(sp->pdev, (dma_addr_t)
2529 ((RxD3_t*)rxdp)->Buffer0_ptr,
2531 PCI_DMA_FROMDEVICE);
2532 pci_unmap_single(sp->pdev, (dma_addr_t)
2533 ((RxD3_t*)rxdp)->Buffer1_ptr,
2535 PCI_DMA_FROMDEVICE);
2536 pci_unmap_single(sp->pdev, (dma_addr_t)
2537 ((RxD3_t*)rxdp)->Buffer2_ptr,
2539 PCI_DMA_FROMDEVICE);
2540 memset(rxdp, 0, sizeof(RxD3_t));
2542 pci_unmap_single(sp->pdev, (dma_addr_t)
2543 ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN,
2544 PCI_DMA_FROMDEVICE);
2545 pci_unmap_single(sp->pdev, (dma_addr_t)
2546 ((RxD3_t*)rxdp)->Buffer1_ptr,
2548 PCI_DMA_FROMDEVICE);
2549 pci_unmap_single(sp->pdev, (dma_addr_t)
2550 ((RxD3_t*)rxdp)->Buffer2_ptr, dev->mtu,
2551 PCI_DMA_FROMDEVICE);
2552 memset(rxdp, 0, sizeof(RxD3_t));
2555 atomic_dec(&sp->rx_bufs_left[ring_no]);
2560 * free_rx_buffers - Frees all Rx buffers
2561 * @sp: device private variable.
2563 * This function will free all Rx buffers allocated by host.
2568 static void free_rx_buffers(struct s2io_nic *sp)
2570 struct net_device *dev = sp->dev;
2571 int i, blk = 0, buf_cnt = 0;
2572 mac_info_t *mac_control;
2573 struct config_param *config;
2575 mac_control = &sp->mac_control;
2576 config = &sp->config;
2578 for (i = 0; i < config->rx_ring_num; i++) {
2579 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2580 free_rxd_blk(sp,i,blk);
2582 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2583 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2584 mac_control->rings[i].rx_curr_put_info.offset = 0;
2585 mac_control->rings[i].rx_curr_get_info.offset = 0;
2586 atomic_set(&sp->rx_bufs_left[i], 0);
2587 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2588 dev->name, buf_cnt, i);
2593 * s2io_poll - Rx interrupt handler for NAPI support
2594 * @dev : pointer to the device structure.
2595 * @budget : The number of packets that were budgeted to be processed
2596 * during one pass through the 'Poll" function.
2598 * Comes into picture only if NAPI support has been incorporated. It does
2599 * the same thing that rx_intr_handler does, but not in a interrupt context
2600 * also It will process only a given number of packets.
2602 * 0 on success and 1 if there are No Rx packets to be processed.
2605 static int s2io_poll(struct net_device *dev, int *budget)
2607 nic_t *nic = dev->priv;
2608 int pkt_cnt = 0, org_pkts_to_process;
2609 mac_info_t *mac_control;
2610 struct config_param *config;
2611 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2614 atomic_inc(&nic->isr_cnt);
2615 mac_control = &nic->mac_control;
2616 config = &nic->config;
2618 nic->pkts_to_process = *budget;
2619 if (nic->pkts_to_process > dev->quota)
2620 nic->pkts_to_process = dev->quota;
2621 org_pkts_to_process = nic->pkts_to_process;
2623 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
2624 readl(&bar0->rx_traffic_int);
2626 for (i = 0; i < config->rx_ring_num; i++) {
2627 rx_intr_handler(&mac_control->rings[i]);
2628 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2629 if (!nic->pkts_to_process) {
2630 /* Quota for the current iteration has been met */
2637 dev->quota -= pkt_cnt;
2639 netif_rx_complete(dev);
2641 for (i = 0; i < config->rx_ring_num; i++) {
2642 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2643 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2644 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2648 /* Re enable the Rx interrupts. */
2649 writeq(0x0, &bar0->rx_traffic_mask);
2650 readl(&bar0->rx_traffic_mask);
2651 atomic_dec(&nic->isr_cnt);
2655 dev->quota -= pkt_cnt;
2658 for (i = 0; i < config->rx_ring_num; i++) {
2659 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2660 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2661 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2665 atomic_dec(&nic->isr_cnt);
2669 #ifdef CONFIG_NET_POLL_CONTROLLER
2671 * s2io_netpoll - netpoll event handler entry point
2672 * @dev : pointer to the device structure.
2674 * This function will be called by upper layer to check for events on the
2675 * interface in situations where interrupts are disabled. It is used for
2676 * specific in-kernel networking tasks, such as remote consoles and kernel
2677 * debugging over the network (example netdump in RedHat).
2679 static void s2io_netpoll(struct net_device *dev)
2681 nic_t *nic = dev->priv;
2682 mac_info_t *mac_control;
2683 struct config_param *config;
2684 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2685 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2688 disable_irq(dev->irq);
2690 atomic_inc(&nic->isr_cnt);
2691 mac_control = &nic->mac_control;
2692 config = &nic->config;
2694 writeq(val64, &bar0->rx_traffic_int);
2695 writeq(val64, &bar0->tx_traffic_int);
2697 /* we need to free up the transmitted skbufs or else netpoll will
2698 * run out of skbs and will fail and eventually netpoll application such
2699 * as netdump will fail.
2701 for (i = 0; i < config->tx_fifo_num; i++)
2702 tx_intr_handler(&mac_control->fifos[i]);
2704 /* check for received packet and indicate up to network */
2705 for (i = 0; i < config->rx_ring_num; i++)
2706 rx_intr_handler(&mac_control->rings[i]);
2708 for (i = 0; i < config->rx_ring_num; i++) {
2709 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2710 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2711 DBG_PRINT(ERR_DBG, " in Rx Netpoll!!\n");
2715 atomic_dec(&nic->isr_cnt);
2716 enable_irq(dev->irq);
2722 * rx_intr_handler - Rx interrupt handler
2723 * @nic: device private variable.
2725 * If the interrupt is because of a received frame or if the
2726 * receive ring contains fresh as yet un-processed frames,this function is
2727 * called. It picks out the RxD at which place the last Rx processing had
2728 * stopped and sends the skb to the OSM's Rx handler and then increments
2733 static void rx_intr_handler(ring_info_t *ring_data)
2735 nic_t *nic = ring_data->nic;
2736 struct net_device *dev = (struct net_device *) nic->dev;
2737 int get_block, put_block, put_offset;
2738 rx_curr_get_info_t get_info, put_info;
2740 struct sk_buff *skb;
2744 spin_lock(&nic->rx_lock);
2745 if (atomic_read(&nic->card_state) == CARD_DOWN) {
2746 DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n",
2747 __FUNCTION__, dev->name);
2748 spin_unlock(&nic->rx_lock);
2752 get_info = ring_data->rx_curr_get_info;
2753 get_block = get_info.block_index;
2754 put_info = ring_data->rx_curr_put_info;
2755 put_block = put_info.block_index;
2756 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2758 spin_lock(&nic->put_lock);
2759 put_offset = ring_data->put_pos;
2760 spin_unlock(&nic->put_lock);
2762 put_offset = ring_data->put_pos;
2764 while (RXD_IS_UP2DT(rxdp)) {
2766 * If your are next to put index then it's
2767 * FIFO full condition
2769 if ((get_block == put_block) &&
2770 (get_info.offset + 1) == put_info.offset) {
2771 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
2774 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2776 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2778 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2779 spin_unlock(&nic->rx_lock);
2782 if (nic->rxd_mode == RXD_MODE_1) {
2783 pci_unmap_single(nic->pdev, (dma_addr_t)
2784 ((RxD1_t*)rxdp)->Buffer0_ptr,
2786 HEADER_ETHERNET_II_802_3_SIZE +
2789 PCI_DMA_FROMDEVICE);
2790 } else if (nic->rxd_mode == RXD_MODE_3B) {
2791 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2792 ((RxD3_t*)rxdp)->Buffer0_ptr,
2793 BUF0_LEN, PCI_DMA_FROMDEVICE);
2794 pci_unmap_single(nic->pdev, (dma_addr_t)
2795 ((RxD3_t*)rxdp)->Buffer2_ptr,
2797 PCI_DMA_FROMDEVICE);
2799 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2800 ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN,
2801 PCI_DMA_FROMDEVICE);
2802 pci_unmap_single(nic->pdev, (dma_addr_t)
2803 ((RxD3_t*)rxdp)->Buffer1_ptr,
2805 PCI_DMA_FROMDEVICE);
2806 pci_unmap_single(nic->pdev, (dma_addr_t)
2807 ((RxD3_t*)rxdp)->Buffer2_ptr,
2808 dev->mtu, PCI_DMA_FROMDEVICE);
2810 prefetch(skb->data);
2811 rx_osm_handler(ring_data, rxdp);
2813 ring_data->rx_curr_get_info.offset = get_info.offset;
2814 rxdp = ring_data->rx_blocks[get_block].
2815 rxds[get_info.offset].virt_addr;
2816 if (get_info.offset == rxd_count[nic->rxd_mode]) {
2817 get_info.offset = 0;
2818 ring_data->rx_curr_get_info.offset = get_info.offset;
2820 if (get_block == ring_data->block_count)
2822 ring_data->rx_curr_get_info.block_index = get_block;
2823 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2826 nic->pkts_to_process -= 1;
2827 if ((napi) && (!nic->pkts_to_process))
2830 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2834 /* Clear all LRO sessions before exiting */
2835 for (i=0; i<MAX_LRO_SESSIONS; i++) {
2836 lro_t *lro = &nic->lro0_n[i];
2838 update_L3L4_header(nic, lro);
2839 queue_rx_frame(lro->parent);
2840 clear_lro_session(lro);
2845 spin_unlock(&nic->rx_lock);
2849 * tx_intr_handler - Transmit interrupt handler
2850 * @nic : device private variable
2852 * If an interrupt was raised to indicate DMA complete of the
2853 * Tx packet, this function is called. It identifies the last TxD
2854 * whose buffer was freed and frees all skbs whose data have already
2855 * DMA'ed into the NICs internal memory.
2860 static void tx_intr_handler(fifo_info_t *fifo_data)
2862 nic_t *nic = fifo_data->nic;
2863 struct net_device *dev = (struct net_device *) nic->dev;
2864 tx_curr_get_info_t get_info, put_info;
2865 struct sk_buff *skb;
2868 get_info = fifo_data->tx_curr_get_info;
2869 put_info = fifo_data->tx_curr_put_info;
2870 txdlp = (TxD_t *) fifo_data->list_info[get_info.offset].
2872 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2873 (get_info.offset != put_info.offset) &&
2874 (txdlp->Host_Control)) {
2875 /* Check for TxD errors */
2876 if (txdlp->Control_1 & TXD_T_CODE) {
2877 unsigned long long err;
2878 err = txdlp->Control_1 & TXD_T_CODE;
2880 nic->mac_control.stats_info->sw_stat.
2883 if ((err >> 48) == 0xA) {
2884 DBG_PRINT(TX_DBG, "TxD returned due \
2885 to loss of link\n");
2888 DBG_PRINT(ERR_DBG, "***TxD error %llx\n", err);
2892 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
2894 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2896 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2900 /* Updating the statistics block */
2901 nic->stats.tx_bytes += skb->len;
2902 dev_kfree_skb_irq(skb);
2905 if (get_info.offset == get_info.fifo_len + 1)
2906 get_info.offset = 0;
2907 txdlp = (TxD_t *) fifo_data->list_info
2908 [get_info.offset].list_virt_addr;
2909 fifo_data->tx_curr_get_info.offset =
2913 spin_lock(&nic->tx_lock);
2914 if (netif_queue_stopped(dev))
2915 netif_wake_queue(dev);
2916 spin_unlock(&nic->tx_lock);
2920 * s2io_mdio_write - Function to write in to MDIO registers
2921 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2922 * @addr : address value
2923 * @value : data value
2924 * @dev : pointer to net_device structure
2926 * This function is used to write values to the MDIO registers
2929 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
2932 nic_t *sp = dev->priv;
2933 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2935 //address transaction
2936 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2937 | MDIO_MMD_DEV_ADDR(mmd_type)
2938 | MDIO_MMS_PRT_ADDR(0x0);
2939 writeq(val64, &bar0->mdio_control);
2940 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2941 writeq(val64, &bar0->mdio_control);
2946 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2947 | MDIO_MMD_DEV_ADDR(mmd_type)
2948 | MDIO_MMS_PRT_ADDR(0x0)
2949 | MDIO_MDIO_DATA(value)
2950 | MDIO_OP(MDIO_OP_WRITE_TRANS);
2951 writeq(val64, &bar0->mdio_control);
2952 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2953 writeq(val64, &bar0->mdio_control);
2957 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2958 | MDIO_MMD_DEV_ADDR(mmd_type)
2959 | MDIO_MMS_PRT_ADDR(0x0)
2960 | MDIO_OP(MDIO_OP_READ_TRANS);
2961 writeq(val64, &bar0->mdio_control);
2962 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2963 writeq(val64, &bar0->mdio_control);
2969 * s2io_mdio_read - Function to write in to MDIO registers
2970 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2971 * @addr : address value
2972 * @dev : pointer to net_device structure
2974 * This function is used to read values to the MDIO registers
2977 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
2981 nic_t *sp = dev->priv;
2982 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2984 /* address transaction */
2985 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2986 | MDIO_MMD_DEV_ADDR(mmd_type)
2987 | MDIO_MMS_PRT_ADDR(0x0);
2988 writeq(val64, &bar0->mdio_control);
2989 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2990 writeq(val64, &bar0->mdio_control);
2993 /* Data transaction */
2995 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2996 | MDIO_MMD_DEV_ADDR(mmd_type)
2997 | MDIO_MMS_PRT_ADDR(0x0)
2998 | MDIO_OP(MDIO_OP_READ_TRANS);
2999 writeq(val64, &bar0->mdio_control);
3000 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3001 writeq(val64, &bar0->mdio_control);
3004 /* Read the value from regs */
3005 rval64 = readq(&bar0->mdio_control);
3006 rval64 = rval64 & 0xFFFF0000;
3007 rval64 = rval64 >> 16;
3011 * s2io_chk_xpak_counter - Function to check the status of the xpak counters
3012 * @counter : couter value to be updated
3013 * @flag : flag to indicate the status
3014 * @type : counter type
3016 * This function is to check the status of the xpak counters value
3020 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
3025 for(i = 0; i <index; i++)
3030 *counter = *counter + 1;
3031 val64 = *regs_stat & mask;
3032 val64 = val64 >> (index * 0x2);
3039 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3040 "service. Excessive temperatures may "
3041 "result in premature transceiver "
3045 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3046 "service Excessive bias currents may "
3047 "indicate imminent laser diode "
3051 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3052 "service Excessive laser output "
3053 "power may saturate far-end "
3057 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
3062 val64 = val64 << (index * 0x2);
3063 *regs_stat = (*regs_stat & (~mask)) | (val64);
3066 *regs_stat = *regs_stat & (~mask);
3071 * s2io_updt_xpak_counter - Function to update the xpak counters
3072 * @dev : pointer to net_device struct
3074 * This function is to upate the status of the xpak counters value
3077 static void s2io_updt_xpak_counter(struct net_device *dev)
3085 nic_t *sp = dev->priv;
3086 StatInfo_t *stat_info = sp->mac_control.stats_info;
3088 /* Check the communication with the MDIO slave */
3091 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3092 if((val64 == 0xFFFF) || (val64 == 0x0000))
3094 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3095 "Returned %llx\n", (unsigned long long)val64);
3099 /* Check for the expecte value of 2040 at PMA address 0x0000 */
3102 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3103 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3104 (unsigned long long)val64);
3108 /* Loading the DOM register to MDIO register */
3110 s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3111 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3113 /* Reading the Alarm flags */
3116 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3118 flag = CHECKBIT(val64, 0x7);
3120 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3121 &stat_info->xpak_stat.xpak_regs_stat,
3124 if(CHECKBIT(val64, 0x6))
3125 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3127 flag = CHECKBIT(val64, 0x3);
3129 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3130 &stat_info->xpak_stat.xpak_regs_stat,
3133 if(CHECKBIT(val64, 0x2))
3134 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3136 flag = CHECKBIT(val64, 0x1);
3138 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3139 &stat_info->xpak_stat.xpak_regs_stat,
3142 if(CHECKBIT(val64, 0x0))
3143 stat_info->xpak_stat.alarm_laser_output_power_low++;
3145 /* Reading the Warning flags */
3148 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3150 if(CHECKBIT(val64, 0x7))
3151 stat_info->xpak_stat.warn_transceiver_temp_high++;
3153 if(CHECKBIT(val64, 0x6))
3154 stat_info->xpak_stat.warn_transceiver_temp_low++;
3156 if(CHECKBIT(val64, 0x3))
3157 stat_info->xpak_stat.warn_laser_bias_current_high++;
3159 if(CHECKBIT(val64, 0x2))
3160 stat_info->xpak_stat.warn_laser_bias_current_low++;
3162 if(CHECKBIT(val64, 0x1))
3163 stat_info->xpak_stat.warn_laser_output_power_high++;
3165 if(CHECKBIT(val64, 0x0))
3166 stat_info->xpak_stat.warn_laser_output_power_low++;
3170 * alarm_intr_handler - Alarm Interrrupt handler
3171 * @nic: device private variable
3172 * Description: If the interrupt was neither because of Rx packet or Tx
3173 * complete, this function is called. If the interrupt was to indicate
3174 * a loss of link, the OSM link status handler is invoked for any other
3175 * alarm interrupt the block that raised the interrupt is displayed
3176 * and a H/W reset is issued.
3181 static void alarm_intr_handler(struct s2io_nic *nic)
3183 struct net_device *dev = (struct net_device *) nic->dev;
3184 XENA_dev_config_t __iomem *bar0 = nic->bar0;
3185 register u64 val64 = 0, err_reg = 0;
3188 nic->mac_control.stats_info->sw_stat.ring_full_cnt = 0;
3189 /* Handling the XPAK counters update */
3190 if(nic->mac_control.stats_info->xpak_stat.xpak_timer_count < 72000) {
3191 /* waiting for an hour */
3192 nic->mac_control.stats_info->xpak_stat.xpak_timer_count++;
3194 s2io_updt_xpak_counter(dev);
3195 /* reset the count to zero */
3196 nic->mac_control.stats_info->xpak_stat.xpak_timer_count = 0;
3199 /* Handling link status change error Intr */
3200 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
3201 err_reg = readq(&bar0->mac_rmac_err_reg);
3202 writeq(err_reg, &bar0->mac_rmac_err_reg);
3203 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
3204 schedule_work(&nic->set_link_task);
3208 /* Handling Ecc errors */
3209 val64 = readq(&bar0->mc_err_reg);
3210 writeq(val64, &bar0->mc_err_reg);
3211 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
3212 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
3213 nic->mac_control.stats_info->sw_stat.
3215 DBG_PRINT(INIT_DBG, "%s: Device indicates ",
3217 DBG_PRINT(INIT_DBG, "double ECC error!!\n");
3218 if (nic->device_type != XFRAME_II_DEVICE) {
3219 /* Reset XframeI only if critical error */
3220 if (val64 & (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
3221 MC_ERR_REG_MIRI_ECC_DB_ERR_1)) {
3222 netif_stop_queue(dev);
3223 schedule_work(&nic->rst_timer_task);
3224 nic->mac_control.stats_info->sw_stat.
3229 nic->mac_control.stats_info->sw_stat.
3234 /* In case of a serious error, the device will be Reset. */
3235 val64 = readq(&bar0->serr_source);
3236 if (val64 & SERR_SOURCE_ANY) {
3237 nic->mac_control.stats_info->sw_stat.serious_err_cnt++;
3238 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
3239 DBG_PRINT(ERR_DBG, "serious error %llx!!\n",
3240 (unsigned long long)val64);
3241 netif_stop_queue(dev);
3242 schedule_work(&nic->rst_timer_task);
3243 nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
3247 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
3248 * Error occurs, the adapter will be recycled by disabling the
3249 * adapter enable bit and enabling it again after the device
3250 * becomes Quiescent.
3252 val64 = readq(&bar0->pcc_err_reg);
3253 writeq(val64, &bar0->pcc_err_reg);
3254 if (val64 & PCC_FB_ECC_DB_ERR) {
3255 u64 ac = readq(&bar0->adapter_control);
3256 ac &= ~(ADAPTER_CNTL_EN);
3257 writeq(ac, &bar0->adapter_control);
3258 ac = readq(&bar0->adapter_control);
3259 schedule_work(&nic->set_link_task);
3261 /* Check for data parity error */
3262 val64 = readq(&bar0->pic_int_status);
3263 if (val64 & PIC_INT_GPIO) {
3264 val64 = readq(&bar0->gpio_int_reg);
3265 if (val64 & GPIO_INT_REG_DP_ERR_INT) {
3266 nic->mac_control.stats_info->sw_stat.parity_err_cnt++;
3267 schedule_work(&nic->rst_timer_task);
3268 nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
3272 /* Check for ring full counter */
3273 if (nic->device_type & XFRAME_II_DEVICE) {
3274 val64 = readq(&bar0->ring_bump_counter1);
3275 for (i=0; i<4; i++) {
3276 cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
3277 cnt >>= 64 - ((i+1)*16);
3278 nic->mac_control.stats_info->sw_stat.ring_full_cnt
3282 val64 = readq(&bar0->ring_bump_counter2);
3283 for (i=0; i<4; i++) {
3284 cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
3285 cnt >>= 64 - ((i+1)*16);
3286 nic->mac_control.stats_info->sw_stat.ring_full_cnt
3291 /* Other type of interrupts are not being handled now, TODO */
3295 * wait_for_cmd_complete - waits for a command to complete.
3296 * @sp : private member of the device structure, which is a pointer to the
3297 * s2io_nic structure.
3298 * Description: Function that waits for a command to Write into RMAC
3299 * ADDR DATA registers to be completed and returns either success or
3300 * error depending on whether the command was complete or not.
3302 * SUCCESS on success and FAILURE on failure.
3305 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit)
3307 int ret = FAILURE, cnt = 0;
3311 val64 = readq(addr);
3312 if (!(val64 & busy_bit)) {
3328 * check_pci_device_id - Checks if the device id is supported
3330 * Description: Function to check if the pci device id is supported by driver.
3331 * Return value: Actual device id if supported else PCI_ANY_ID
3333 static u16 check_pci_device_id(u16 id)
3336 case PCI_DEVICE_ID_HERC_WIN:
3337 case PCI_DEVICE_ID_HERC_UNI:
3338 return XFRAME_II_DEVICE;
3339 case PCI_DEVICE_ID_S2IO_UNI:
3340 case PCI_DEVICE_ID_S2IO_WIN:
3341 return XFRAME_I_DEVICE;
3348 * s2io_reset - Resets the card.
3349 * @sp : private member of the device structure.
3350 * Description: Function to Reset the card. This function then also
3351 * restores the previously saved PCI configuration space registers as
3352 * the card reset also resets the configuration space.
3357 static void s2io_reset(nic_t * sp)
3359 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3364 DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3365 __FUNCTION__, sp->dev->name);
3367 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3368 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3370 if (sp->device_type == XFRAME_II_DEVICE) {
3372 ret = pci_set_power_state(sp->pdev, 3);
3374 ret = pci_set_power_state(sp->pdev, 0);
3376 DBG_PRINT(ERR_DBG,"%s PME based SW_Reset failed!\n",
3384 val64 = SW_RESET_ALL;
3385 writeq(val64, &bar0->sw_reset);
3387 if (strstr(sp->product_name, "CX4")) {
3391 for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3393 /* Restore the PCI state saved during initialization. */
3394 pci_restore_state(sp->pdev);
3395 pci_read_config_word(sp->pdev, 0x2, &val16);
3396 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3401 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3402 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
3405 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3409 /* Set swapper to enable I/O register access */
3410 s2io_set_swapper(sp);
3412 /* Restore the MSIX table entries from local variables */
3413 restore_xmsi_data(sp);
3415 /* Clear certain PCI/PCI-X fields after reset */
3416 if (sp->device_type == XFRAME_II_DEVICE) {
3417 /* Clear "detected parity error" bit */
3418 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3420 /* Clearing PCIX Ecc status register */
3421 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3423 /* Clearing PCI_STATUS error reflected here */
3424 writeq(BIT(62), &bar0->txpic_int_reg);
3427 /* Reset device statistics maintained by OS */
3428 memset(&sp->stats, 0, sizeof (struct net_device_stats));
3430 /* SXE-002: Configure link and activity LED to turn it off */
3431 subid = sp->pdev->subsystem_device;
3432 if (((subid & 0xFF) >= 0x07) &&
3433 (sp->device_type == XFRAME_I_DEVICE)) {
3434 val64 = readq(&bar0->gpio_control);
3435 val64 |= 0x0000800000000000ULL;
3436 writeq(val64, &bar0->gpio_control);
3437 val64 = 0x0411040400000000ULL;
3438 writeq(val64, (void __iomem *)bar0 + 0x2700);
3442 * Clear spurious ECC interrupts that would have occured on
3443 * XFRAME II cards after reset.
3445 if (sp->device_type == XFRAME_II_DEVICE) {
3446 val64 = readq(&bar0->pcc_err_reg);
3447 writeq(val64, &bar0->pcc_err_reg);
3450 sp->device_enabled_once = FALSE;
3454 * s2io_set_swapper - to set the swapper controle on the card
3455 * @sp : private member of the device structure,
3456 * pointer to the s2io_nic structure.
3457 * Description: Function to set the swapper control on the card
3458 * correctly depending on the 'endianness' of the system.
3460 * SUCCESS on success and FAILURE on failure.
3463 static int s2io_set_swapper(nic_t * sp)
3465 struct net_device *dev = sp->dev;
3466 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3467 u64 val64, valt, valr;
3470 * Set proper endian settings and verify the same by reading
3471 * the PIF Feed-back register.
3474 val64 = readq(&bar0->pif_rd_swapper_fb);
3475 if (val64 != 0x0123456789ABCDEFULL) {
3477 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
3478 0x8100008181000081ULL, /* FE=1, SE=0 */
3479 0x4200004242000042ULL, /* FE=0, SE=1 */
3480 0}; /* FE=0, SE=0 */
3483 writeq(value[i], &bar0->swapper_ctrl);
3484 val64 = readq(&bar0->pif_rd_swapper_fb);
3485 if (val64 == 0x0123456789ABCDEFULL)
3490 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3492 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3493 (unsigned long long) val64);
3498 valr = readq(&bar0->swapper_ctrl);
3501 valt = 0x0123456789ABCDEFULL;
3502 writeq(valt, &bar0->xmsi_address);
3503 val64 = readq(&bar0->xmsi_address);
3507 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
3508 0x0081810000818100ULL, /* FE=1, SE=0 */
3509 0x0042420000424200ULL, /* FE=0, SE=1 */
3510 0}; /* FE=0, SE=0 */
3513 writeq((value[i] | valr), &bar0->swapper_ctrl);
3514 writeq(valt, &bar0->xmsi_address);
3515 val64 = readq(&bar0->xmsi_address);
3521 unsigned long long x = val64;
3522 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3523 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3527 val64 = readq(&bar0->swapper_ctrl);
3528 val64 &= 0xFFFF000000000000ULL;
3532 * The device by default set to a big endian format, so a
3533 * big endian driver need not set anything.
3535 val64 |= (SWAPPER_CTRL_TXP_FE |
3536 SWAPPER_CTRL_TXP_SE |
3537 SWAPPER_CTRL_TXD_R_FE |
3538 SWAPPER_CTRL_TXD_W_FE |
3539 SWAPPER_CTRL_TXF_R_FE |
3540 SWAPPER_CTRL_RXD_R_FE |
3541 SWAPPER_CTRL_RXD_W_FE |
3542 SWAPPER_CTRL_RXF_W_FE |
3543 SWAPPER_CTRL_XMSI_FE |
3544 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3545 if (sp->intr_type == INTA)
3546 val64 |= SWAPPER_CTRL_XMSI_SE;
3547 writeq(val64, &bar0->swapper_ctrl);
3550 * Initially we enable all bits to make it accessible by the
3551 * driver, then we selectively enable only those bits that
3554 val64 |= (SWAPPER_CTRL_TXP_FE |
3555 SWAPPER_CTRL_TXP_SE |
3556 SWAPPER_CTRL_TXD_R_FE |
3557 SWAPPER_CTRL_TXD_R_SE |
3558 SWAPPER_CTRL_TXD_W_FE |
3559 SWAPPER_CTRL_TXD_W_SE |
3560 SWAPPER_CTRL_TXF_R_FE |
3561 SWAPPER_CTRL_RXD_R_FE |
3562 SWAPPER_CTRL_RXD_R_SE |
3563 SWAPPER_CTRL_RXD_W_FE |
3564 SWAPPER_CTRL_RXD_W_SE |
3565 SWAPPER_CTRL_RXF_W_FE |
3566 SWAPPER_CTRL_XMSI_FE |
3567 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3568 if (sp->intr_type == INTA)
3569 val64 |= SWAPPER_CTRL_XMSI_SE;
3570 writeq(val64, &bar0->swapper_ctrl);
3572 val64 = readq(&bar0->swapper_ctrl);
3575 * Verifying if endian settings are accurate by reading a
3576 * feedback register.
3578 val64 = readq(&bar0->pif_rd_swapper_fb);
3579 if (val64 != 0x0123456789ABCDEFULL) {
3580 /* Endian settings are incorrect, calls for another dekko. */
3581 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3583 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3584 (unsigned long long) val64);
3591 static int wait_for_msix_trans(nic_t *nic, int i)
3593 XENA_dev_config_t __iomem *bar0 = nic->bar0;
3595 int ret = 0, cnt = 0;
3598 val64 = readq(&bar0->xmsi_access);
3599 if (!(val64 & BIT(15)))