1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2005 Neterion Inc.
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
30 * rx_ring_num : This can be used to program the number of receive rings used
32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
33 * This is also an array of size 8.
34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35 * values are 1, 2 and 3.
36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
38 * Tx descriptors that can be associated with each corresponding FIFO.
39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40 * 1(MSI), 2(MSI_X). Default value is '0(INTA)'
41 * lro: Specifies whether to enable Large Receive Offload (LRO) or not.
42 * Possible values '1' for enable '0' for disable. Default is '0'
43 * lro_max_pkts: This parameter defines maximum number of packets can be
44 * aggregated as a single large packet
45 ************************************************************************/
47 #include <linux/module.h>
48 #include <linux/types.h>
49 #include <linux/errno.h>
50 #include <linux/ioport.h>
51 #include <linux/pci.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/kernel.h>
54 #include <linux/netdevice.h>
55 #include <linux/etherdevice.h>
56 #include <linux/skbuff.h>
57 #include <linux/init.h>
58 #include <linux/delay.h>
59 #include <linux/stddef.h>
60 #include <linux/ioctl.h>
61 #include <linux/timex.h>
62 #include <linux/ethtool.h>
63 #include <linux/workqueue.h>
64 #include <linux/if_vlan.h>
66 #include <linux/tcp.h>
69 #include <asm/system.h>
70 #include <asm/uaccess.h>
72 #include <asm/div64.h>
77 #include "s2io-regs.h"
79 #define DRV_VERSION "2.0.16.1"
81 /* S2io Driver name & version. */
82 static char s2io_driver_name[] = "Neterion";
83 static char s2io_driver_version[] = DRV_VERSION;
85 static int rxd_size[4] = {32,48,48,64};
86 static int rxd_count[4] = {127,85,85,63};
88 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
92 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
93 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
99 * Cards with following subsystem_id have a link state indication
100 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
101 * macro below identifies these cards given the subsystem_id.
103 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
104 (dev_type == XFRAME_I_DEVICE) ? \
105 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
106 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
108 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
109 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
110 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
113 static inline int rx_buffer_level(struct s2io_nic * sp, int rxb_size, int ring)
115 struct mac_info *mac_control;
117 mac_control = &sp->mac_control;
118 if (rxb_size <= rxd_count[sp->rxd_mode])
120 else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
125 /* Ethtool related variables and Macros. */
126 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
127 "Register test\t(offline)",
128 "Eeprom test\t(offline)",
129 "Link test\t(online)",
130 "RLDRAM test\t(offline)",
131 "BIST Test\t(offline)"
134 static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
136 {"tmac_data_octets"},
140 {"tmac_pause_ctrl_frms"},
144 {"tmac_any_err_frms"},
145 {"tmac_ttl_less_fb_octets"},
146 {"tmac_vld_ip_octets"},
154 {"rmac_data_octets"},
155 {"rmac_fcs_err_frms"},
157 {"rmac_vld_mcst_frms"},
158 {"rmac_vld_bcst_frms"},
159 {"rmac_in_rng_len_err_frms"},
160 {"rmac_out_rng_len_err_frms"},
162 {"rmac_pause_ctrl_frms"},
163 {"rmac_unsup_ctrl_frms"},
165 {"rmac_accepted_ucst_frms"},
166 {"rmac_accepted_nucst_frms"},
167 {"rmac_discarded_frms"},
168 {"rmac_drop_events"},
169 {"rmac_ttl_less_fb_octets"},
171 {"rmac_usized_frms"},
172 {"rmac_osized_frms"},
174 {"rmac_jabber_frms"},
175 {"rmac_ttl_64_frms"},
176 {"rmac_ttl_65_127_frms"},
177 {"rmac_ttl_128_255_frms"},
178 {"rmac_ttl_256_511_frms"},
179 {"rmac_ttl_512_1023_frms"},
180 {"rmac_ttl_1024_1518_frms"},
188 {"rmac_err_drp_udp"},
189 {"rmac_xgmii_err_sym"},
207 {"rmac_xgmii_data_err_cnt"},
208 {"rmac_xgmii_ctrl_err_cnt"},
209 {"rmac_accepted_ip"},
213 {"new_rd_req_rtry_cnt"},
215 {"wr_rtry_rd_ack_cnt"},
218 {"new_wr_req_rtry_cnt"},
221 {"rd_rtry_wr_ack_cnt"},
229 {"rmac_ttl_1519_4095_frms"},
230 {"rmac_ttl_4096_8191_frms"},
231 {"rmac_ttl_8192_max_frms"},
232 {"rmac_ttl_gt_max_frms"},
233 {"rmac_osized_alt_frms"},
234 {"rmac_jabber_alt_frms"},
235 {"rmac_gt_max_alt_frms"},
237 {"rmac_len_discard"},
238 {"rmac_fcs_discard"},
241 {"rmac_red_discard"},
242 {"rmac_rts_discard"},
243 {"rmac_ingm_full_discard"},
245 {"\n DRIVER STATISTICS"},
246 {"single_bit_ecc_errs"},
247 {"double_bit_ecc_errs"},
253 ("alarm_transceiver_temp_high"),
254 ("alarm_transceiver_temp_low"),
255 ("alarm_laser_bias_current_high"),
256 ("alarm_laser_bias_current_low"),
257 ("alarm_laser_output_power_high"),
258 ("alarm_laser_output_power_low"),
259 ("warn_transceiver_temp_high"),
260 ("warn_transceiver_temp_low"),
261 ("warn_laser_bias_current_high"),
262 ("warn_laser_bias_current_low"),
263 ("warn_laser_output_power_high"),
264 ("warn_laser_output_power_low"),
265 ("lro_aggregated_pkts"),
266 ("lro_flush_both_count"),
267 ("lro_out_of_sequence_pkts"),
268 ("lro_flush_due_to_max_pkts"),
269 ("lro_avg_aggr_pkts"),
272 #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
273 #define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
275 #define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
276 #define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
278 #define S2IO_TIMER_CONF(timer, handle, arg, exp) \
279 init_timer(&timer); \
280 timer.function = handle; \
281 timer.data = (unsigned long) arg; \
282 mod_timer(&timer, (jiffies + exp)) \
285 static void s2io_vlan_rx_register(struct net_device *dev,
286 struct vlan_group *grp)
288 struct s2io_nic *nic = dev->priv;
291 spin_lock_irqsave(&nic->tx_lock, flags);
293 spin_unlock_irqrestore(&nic->tx_lock, flags);
296 /* Unregister the vlan */
297 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
299 struct s2io_nic *nic = dev->priv;
302 spin_lock_irqsave(&nic->tx_lock, flags);
304 nic->vlgrp->vlan_devices[vid] = NULL;
305 spin_unlock_irqrestore(&nic->tx_lock, flags);
309 * Constants to be programmed into the Xena's registers, to configure
314 static const u64 herc_act_dtx_cfg[] = {
316 0x8000051536750000ULL, 0x80000515367500E0ULL,
318 0x8000051536750004ULL, 0x80000515367500E4ULL,
320 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
322 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
324 0x801205150D440000ULL, 0x801205150D4400E0ULL,
326 0x801205150D440004ULL, 0x801205150D4400E4ULL,
328 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
330 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
335 static const u64 xena_dtx_cfg[] = {
337 0x8000051500000000ULL, 0x80000515000000E0ULL,
339 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
341 0x8001051500000000ULL, 0x80010515000000E0ULL,
343 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
345 0x8002051500000000ULL, 0x80020515000000E0ULL,
347 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
352 * Constants for Fixing the MacAddress problem seen mostly on
355 static const u64 fix_mac[] = {
356 0x0060000000000000ULL, 0x0060600000000000ULL,
357 0x0040600000000000ULL, 0x0000600000000000ULL,
358 0x0020600000000000ULL, 0x0060600000000000ULL,
359 0x0020600000000000ULL, 0x0060600000000000ULL,
360 0x0020600000000000ULL, 0x0060600000000000ULL,
361 0x0020600000000000ULL, 0x0060600000000000ULL,
362 0x0020600000000000ULL, 0x0060600000000000ULL,
363 0x0020600000000000ULL, 0x0060600000000000ULL,
364 0x0020600000000000ULL, 0x0060600000000000ULL,
365 0x0020600000000000ULL, 0x0060600000000000ULL,
366 0x0020600000000000ULL, 0x0060600000000000ULL,
367 0x0020600000000000ULL, 0x0060600000000000ULL,
368 0x0020600000000000ULL, 0x0000600000000000ULL,
369 0x0040600000000000ULL, 0x0060600000000000ULL,
373 MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
374 MODULE_LICENSE("GPL");
375 MODULE_VERSION(DRV_VERSION);
378 /* Module Loadable parameters. */
379 S2IO_PARM_INT(tx_fifo_num, 1);
380 S2IO_PARM_INT(rx_ring_num, 1);
383 S2IO_PARM_INT(rx_ring_mode, 1);
384 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
385 S2IO_PARM_INT(rmac_pause_time, 0x100);
386 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
387 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
388 S2IO_PARM_INT(shared_splits, 0);
389 S2IO_PARM_INT(tmac_util_period, 5);
390 S2IO_PARM_INT(rmac_util_period, 5);
391 S2IO_PARM_INT(bimodal, 0);
392 S2IO_PARM_INT(l3l4hdr_size, 128);
393 /* Frequency of Rx desc syncs expressed as power of 2 */
394 S2IO_PARM_INT(rxsync_frequency, 3);
395 /* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */
396 S2IO_PARM_INT(intr_type, 0);
397 /* Large receive offload feature */
398 S2IO_PARM_INT(lro, 0);
399 /* Max pkts to be aggregated by LRO at one time. If not specified,
400 * aggregation happens until we hit max IP pkt size(64K)
402 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
403 S2IO_PARM_INT(indicate_max_pkts, 0);
405 S2IO_PARM_INT(napi, 1);
406 S2IO_PARM_INT(ufo, 0);
408 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
409 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
410 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
411 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
412 static unsigned int rts_frm_len[MAX_RX_RINGS] =
413 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
415 module_param_array(tx_fifo_len, uint, NULL, 0);
416 module_param_array(rx_ring_sz, uint, NULL, 0);
417 module_param_array(rts_frm_len, uint, NULL, 0);
421 * This table lists all the devices that this driver supports.
423 static struct pci_device_id s2io_tbl[] __devinitdata = {
424 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
425 PCI_ANY_ID, PCI_ANY_ID},
426 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
427 PCI_ANY_ID, PCI_ANY_ID},
428 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
429 PCI_ANY_ID, PCI_ANY_ID},
430 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
431 PCI_ANY_ID, PCI_ANY_ID},
435 MODULE_DEVICE_TABLE(pci, s2io_tbl);
437 static struct pci_driver s2io_driver = {
439 .id_table = s2io_tbl,
440 .probe = s2io_init_nic,
441 .remove = __devexit_p(s2io_rem_nic),
444 /* A simplifier macro used both by init and free shared_mem Fns(). */
445 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
448 * init_shared_mem - Allocation and Initialization of Memory
449 * @nic: Device private variable.
450 * Description: The function allocates all the memory areas shared
451 * between the NIC and the driver. This includes Tx descriptors,
452 * Rx descriptors and the statistics block.
455 static int init_shared_mem(struct s2io_nic *nic)
458 void *tmp_v_addr, *tmp_v_addr_next;
459 dma_addr_t tmp_p_addr, tmp_p_addr_next;
460 struct RxD_block *pre_rxd_blk = NULL;
462 int lst_size, lst_per_page;
463 struct net_device *dev = nic->dev;
467 struct mac_info *mac_control;
468 struct config_param *config;
470 mac_control = &nic->mac_control;
471 config = &nic->config;
474 /* Allocation and initialization of TXDLs in FIOFs */
476 for (i = 0; i < config->tx_fifo_num; i++) {
477 size += config->tx_cfg[i].fifo_len;
479 if (size > MAX_AVAILABLE_TXDS) {
480 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
481 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
485 lst_size = (sizeof(struct TxD) * config->max_txds);
486 lst_per_page = PAGE_SIZE / lst_size;
488 for (i = 0; i < config->tx_fifo_num; i++) {
489 int fifo_len = config->tx_cfg[i].fifo_len;
490 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
491 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
493 if (!mac_control->fifos[i].list_info) {
495 "Malloc failed for list_info\n");
498 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
500 for (i = 0; i < config->tx_fifo_num; i++) {
501 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
503 mac_control->fifos[i].tx_curr_put_info.offset = 0;
504 mac_control->fifos[i].tx_curr_put_info.fifo_len =
505 config->tx_cfg[i].fifo_len - 1;
506 mac_control->fifos[i].tx_curr_get_info.offset = 0;
507 mac_control->fifos[i].tx_curr_get_info.fifo_len =
508 config->tx_cfg[i].fifo_len - 1;
509 mac_control->fifos[i].fifo_no = i;
510 mac_control->fifos[i].nic = nic;
511 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
513 for (j = 0; j < page_num; j++) {
517 tmp_v = pci_alloc_consistent(nic->pdev,
521 "pci_alloc_consistent ");
522 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
525 /* If we got a zero DMA address(can happen on
526 * certain platforms like PPC), reallocate.
527 * Store virtual address of page we don't want,
531 mac_control->zerodma_virt_addr = tmp_v;
533 "%s: Zero DMA address for TxDL. ", dev->name);
535 "Virtual address %p\n", tmp_v);
536 tmp_v = pci_alloc_consistent(nic->pdev,
540 "pci_alloc_consistent ");
541 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
545 while (k < lst_per_page) {
546 int l = (j * lst_per_page) + k;
547 if (l == config->tx_cfg[i].fifo_len)
549 mac_control->fifos[i].list_info[l].list_virt_addr =
550 tmp_v + (k * lst_size);
551 mac_control->fifos[i].list_info[l].list_phy_addr =
552 tmp_p + (k * lst_size);
558 nic->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
559 if (!nic->ufo_in_band_v)
562 /* Allocation and initialization of RXDs in Rings */
564 for (i = 0; i < config->rx_ring_num; i++) {
565 if (config->rx_cfg[i].num_rxd %
566 (rxd_count[nic->rxd_mode] + 1)) {
567 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
568 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
570 DBG_PRINT(ERR_DBG, "RxDs per Block");
573 size += config->rx_cfg[i].num_rxd;
574 mac_control->rings[i].block_count =
575 config->rx_cfg[i].num_rxd /
576 (rxd_count[nic->rxd_mode] + 1 );
577 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
578 mac_control->rings[i].block_count;
580 if (nic->rxd_mode == RXD_MODE_1)
581 size = (size * (sizeof(struct RxD1)));
583 size = (size * (sizeof(struct RxD3)));
585 for (i = 0; i < config->rx_ring_num; i++) {
586 mac_control->rings[i].rx_curr_get_info.block_index = 0;
587 mac_control->rings[i].rx_curr_get_info.offset = 0;
588 mac_control->rings[i].rx_curr_get_info.ring_len =
589 config->rx_cfg[i].num_rxd - 1;
590 mac_control->rings[i].rx_curr_put_info.block_index = 0;
591 mac_control->rings[i].rx_curr_put_info.offset = 0;
592 mac_control->rings[i].rx_curr_put_info.ring_len =
593 config->rx_cfg[i].num_rxd - 1;
594 mac_control->rings[i].nic = nic;
595 mac_control->rings[i].ring_no = i;
597 blk_cnt = config->rx_cfg[i].num_rxd /
598 (rxd_count[nic->rxd_mode] + 1);
599 /* Allocating all the Rx blocks */
600 for (j = 0; j < blk_cnt; j++) {
601 struct rx_block_info *rx_blocks;
604 rx_blocks = &mac_control->rings[i].rx_blocks[j];
605 size = SIZE_OF_BLOCK; //size is always page size
606 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
608 if (tmp_v_addr == NULL) {
610 * In case of failure, free_shared_mem()
611 * is called, which should free any
612 * memory that was alloced till the
615 rx_blocks->block_virt_addr = tmp_v_addr;
618 memset(tmp_v_addr, 0, size);
619 rx_blocks->block_virt_addr = tmp_v_addr;
620 rx_blocks->block_dma_addr = tmp_p_addr;
621 rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
622 rxd_count[nic->rxd_mode],
624 if (!rx_blocks->rxds)
626 for (l=0; l<rxd_count[nic->rxd_mode];l++) {
627 rx_blocks->rxds[l].virt_addr =
628 rx_blocks->block_virt_addr +
629 (rxd_size[nic->rxd_mode] * l);
630 rx_blocks->rxds[l].dma_addr =
631 rx_blocks->block_dma_addr +
632 (rxd_size[nic->rxd_mode] * l);
635 /* Interlinking all Rx Blocks */
636 for (j = 0; j < blk_cnt; j++) {
638 mac_control->rings[i].rx_blocks[j].block_virt_addr;
640 mac_control->rings[i].rx_blocks[(j + 1) %
641 blk_cnt].block_virt_addr;
643 mac_control->rings[i].rx_blocks[j].block_dma_addr;
645 mac_control->rings[i].rx_blocks[(j + 1) %
646 blk_cnt].block_dma_addr;
648 pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
649 pre_rxd_blk->reserved_2_pNext_RxD_block =
650 (unsigned long) tmp_v_addr_next;
651 pre_rxd_blk->pNext_RxD_Blk_physical =
652 (u64) tmp_p_addr_next;
655 if (nic->rxd_mode >= RXD_MODE_3A) {
657 * Allocation of Storages for buffer addresses in 2BUFF mode
658 * and the buffers as well.
660 for (i = 0; i < config->rx_ring_num; i++) {
661 blk_cnt = config->rx_cfg[i].num_rxd /
662 (rxd_count[nic->rxd_mode]+ 1);
663 mac_control->rings[i].ba =
664 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
666 if (!mac_control->rings[i].ba)
668 for (j = 0; j < blk_cnt; j++) {
670 mac_control->rings[i].ba[j] =
671 kmalloc((sizeof(struct buffAdd) *
672 (rxd_count[nic->rxd_mode] + 1)),
674 if (!mac_control->rings[i].ba[j])
676 while (k != rxd_count[nic->rxd_mode]) {
677 ba = &mac_control->rings[i].ba[j][k];
679 ba->ba_0_org = (void *) kmalloc
680 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
683 tmp = (unsigned long)ba->ba_0_org;
685 tmp &= ~((unsigned long) ALIGN_SIZE);
686 ba->ba_0 = (void *) tmp;
688 ba->ba_1_org = (void *) kmalloc
689 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
692 tmp = (unsigned long) ba->ba_1_org;
694 tmp &= ~((unsigned long) ALIGN_SIZE);
695 ba->ba_1 = (void *) tmp;
702 /* Allocation and initialization of Statistics block */
703 size = sizeof(struct stat_block);
704 mac_control->stats_mem = pci_alloc_consistent
705 (nic->pdev, size, &mac_control->stats_mem_phy);
707 if (!mac_control->stats_mem) {
709 * In case of failure, free_shared_mem() is called, which
710 * should free any memory that was alloced till the
715 mac_control->stats_mem_sz = size;
717 tmp_v_addr = mac_control->stats_mem;
718 mac_control->stats_info = (struct stat_block *) tmp_v_addr;
719 memset(tmp_v_addr, 0, size);
720 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
721 (unsigned long long) tmp_p_addr);
727 * free_shared_mem - Free the allocated Memory
728 * @nic: Device private variable.
729 * Description: This function is to free all memory locations allocated by
730 * the init_shared_mem() function and return it to the kernel.
733 static void free_shared_mem(struct s2io_nic *nic)
735 int i, j, blk_cnt, size;
737 dma_addr_t tmp_p_addr;
738 struct mac_info *mac_control;
739 struct config_param *config;
740 int lst_size, lst_per_page;
741 struct net_device *dev = nic->dev;
746 mac_control = &nic->mac_control;
747 config = &nic->config;
749 lst_size = (sizeof(struct TxD) * config->max_txds);
750 lst_per_page = PAGE_SIZE / lst_size;
752 for (i = 0; i < config->tx_fifo_num; i++) {
753 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
755 for (j = 0; j < page_num; j++) {
756 int mem_blks = (j * lst_per_page);
757 if (!mac_control->fifos[i].list_info)
759 if (!mac_control->fifos[i].list_info[mem_blks].
762 pci_free_consistent(nic->pdev, PAGE_SIZE,
763 mac_control->fifos[i].
766 mac_control->fifos[i].
770 /* If we got a zero DMA address during allocation,
773 if (mac_control->zerodma_virt_addr) {
774 pci_free_consistent(nic->pdev, PAGE_SIZE,
775 mac_control->zerodma_virt_addr,
778 "%s: Freeing TxDL with zero DMA addr. ",
780 DBG_PRINT(INIT_DBG, "Virtual address %p\n",
781 mac_control->zerodma_virt_addr);
783 kfree(mac_control->fifos[i].list_info);
786 size = SIZE_OF_BLOCK;
787 for (i = 0; i < config->rx_ring_num; i++) {
788 blk_cnt = mac_control->rings[i].block_count;
789 for (j = 0; j < blk_cnt; j++) {
790 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
792 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
794 if (tmp_v_addr == NULL)
796 pci_free_consistent(nic->pdev, size,
797 tmp_v_addr, tmp_p_addr);
798 kfree(mac_control->rings[i].rx_blocks[j].rxds);
802 if (nic->rxd_mode >= RXD_MODE_3A) {
803 /* Freeing buffer storage addresses in 2BUFF mode. */
804 for (i = 0; i < config->rx_ring_num; i++) {
805 blk_cnt = config->rx_cfg[i].num_rxd /
806 (rxd_count[nic->rxd_mode] + 1);
807 for (j = 0; j < blk_cnt; j++) {
809 if (!mac_control->rings[i].ba[j])
811 while (k != rxd_count[nic->rxd_mode]) {
813 &mac_control->rings[i].ba[j][k];
818 kfree(mac_control->rings[i].ba[j]);
820 kfree(mac_control->rings[i].ba);
824 if (mac_control->stats_mem) {
825 pci_free_consistent(nic->pdev,
826 mac_control->stats_mem_sz,
827 mac_control->stats_mem,
828 mac_control->stats_mem_phy);
830 if (nic->ufo_in_band_v)
831 kfree(nic->ufo_in_band_v);
835 * s2io_verify_pci_mode -
838 static int s2io_verify_pci_mode(struct s2io_nic *nic)
840 struct XENA_dev_config __iomem *bar0 = nic->bar0;
841 register u64 val64 = 0;
844 val64 = readq(&bar0->pci_mode);
845 mode = (u8)GET_PCI_MODE(val64);
847 if ( val64 & PCI_MODE_UNKNOWN_MODE)
848 return -1; /* Unknown PCI mode */
852 #define NEC_VENID 0x1033
853 #define NEC_DEVID 0x0125
854 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
856 struct pci_dev *tdev = NULL;
857 while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
858 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
859 if (tdev->bus == s2io_pdev->bus->parent)
867 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
869 * s2io_print_pci_mode -
871 static int s2io_print_pci_mode(struct s2io_nic *nic)
873 struct XENA_dev_config __iomem *bar0 = nic->bar0;
874 register u64 val64 = 0;
876 struct config_param *config = &nic->config;
878 val64 = readq(&bar0->pci_mode);
879 mode = (u8)GET_PCI_MODE(val64);
881 if ( val64 & PCI_MODE_UNKNOWN_MODE)
882 return -1; /* Unknown PCI mode */
884 config->bus_speed = bus_speed[mode];
886 if (s2io_on_nec_bridge(nic->pdev)) {
887 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
892 if (val64 & PCI_MODE_32_BITS) {
893 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
895 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
899 case PCI_MODE_PCI_33:
900 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
902 case PCI_MODE_PCI_66:
903 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
905 case PCI_MODE_PCIX_M1_66:
906 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
908 case PCI_MODE_PCIX_M1_100:
909 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
911 case PCI_MODE_PCIX_M1_133:
912 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
914 case PCI_MODE_PCIX_M2_66:
915 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
917 case PCI_MODE_PCIX_M2_100:
918 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
920 case PCI_MODE_PCIX_M2_133:
921 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
924 return -1; /* Unsupported bus speed */
931 * init_nic - Initialization of hardware
932 * @nic: device peivate variable
933 * Description: The function sequentially configures every block
934 * of the H/W from their reset values.
935 * Return Value: SUCCESS on success and
936 * '-1' on failure (endian settings incorrect).
939 static int init_nic(struct s2io_nic *nic)
941 struct XENA_dev_config __iomem *bar0 = nic->bar0;
942 struct net_device *dev = nic->dev;
943 register u64 val64 = 0;
947 struct mac_info *mac_control;
948 struct config_param *config;
950 unsigned long long mem_share;
953 mac_control = &nic->mac_control;
954 config = &nic->config;
956 /* to set the swapper controle on the card */
957 if(s2io_set_swapper(nic)) {
958 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
963 * Herc requires EOI to be removed from reset before XGXS, so..
965 if (nic->device_type & XFRAME_II_DEVICE) {
966 val64 = 0xA500000000ULL;
967 writeq(val64, &bar0->sw_reset);
969 val64 = readq(&bar0->sw_reset);
972 /* Remove XGXS from reset state */
974 writeq(val64, &bar0->sw_reset);
976 val64 = readq(&bar0->sw_reset);
978 /* Enable Receiving broadcasts */
979 add = &bar0->mac_cfg;
980 val64 = readq(&bar0->mac_cfg);
981 val64 |= MAC_RMAC_BCAST_ENABLE;
982 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
983 writel((u32) val64, add);
984 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
985 writel((u32) (val64 >> 32), (add + 4));
987 /* Read registers in all blocks */
988 val64 = readq(&bar0->mac_int_mask);
989 val64 = readq(&bar0->mc_int_mask);
990 val64 = readq(&bar0->xgxs_int_mask);
994 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
996 if (nic->device_type & XFRAME_II_DEVICE) {
997 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
998 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
999 &bar0->dtx_control, UF);
1001 msleep(1); /* Necessary!! */
1005 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1006 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1007 &bar0->dtx_control, UF);
1008 val64 = readq(&bar0->dtx_control);
1013 /* Tx DMA Initialization */
1015 writeq(val64, &bar0->tx_fifo_partition_0);
1016 writeq(val64, &bar0->tx_fifo_partition_1);
1017 writeq(val64, &bar0->tx_fifo_partition_2);
1018 writeq(val64, &bar0->tx_fifo_partition_3);
1021 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1023 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
1024 13) | vBIT(config->tx_cfg[i].fifo_priority,
1027 if (i == (config->tx_fifo_num - 1)) {
1034 writeq(val64, &bar0->tx_fifo_partition_0);
1038 writeq(val64, &bar0->tx_fifo_partition_1);
1042 writeq(val64, &bar0->tx_fifo_partition_2);
1046 writeq(val64, &bar0->tx_fifo_partition_3);
1052 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1053 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1055 if ((nic->device_type == XFRAME_I_DEVICE) &&
1056 (get_xena_rev_id(nic->pdev) < 4))
1057 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1059 val64 = readq(&bar0->tx_fifo_partition_0);
1060 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1061 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1064 * Initialization of Tx_PA_CONFIG register to ignore packet
1065 * integrity checking.
1067 val64 = readq(&bar0->tx_pa_cfg);
1068 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1069 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1070 writeq(val64, &bar0->tx_pa_cfg);
1072 /* Rx DMA intialization. */
1074 for (i = 0; i < config->rx_ring_num; i++) {
1076 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1079 writeq(val64, &bar0->rx_queue_priority);
1082 * Allocating equal share of memory to all the
1086 if (nic->device_type & XFRAME_II_DEVICE)
1091 for (i = 0; i < config->rx_ring_num; i++) {
1094 mem_share = (mem_size / config->rx_ring_num +
1095 mem_size % config->rx_ring_num);
1096 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1099 mem_share = (mem_size / config->rx_ring_num);
1100 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1103 mem_share = (mem_size / config->rx_ring_num);
1104 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1107 mem_share = (mem_size / config->rx_ring_num);
1108 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1111 mem_share = (mem_size / config->rx_ring_num);
1112 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1115 mem_share = (mem_size / config->rx_ring_num);
1116 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1119 mem_share = (mem_size / config->rx_ring_num);
1120 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1123 mem_share = (mem_size / config->rx_ring_num);
1124 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1128 writeq(val64, &bar0->rx_queue_cfg);
1131 * Filling Tx round robin registers
1132 * as per the number of FIFOs
1134 switch (config->tx_fifo_num) {
1136 val64 = 0x0000000000000000ULL;
1137 writeq(val64, &bar0->tx_w_round_robin_0);
1138 writeq(val64, &bar0->tx_w_round_robin_1);
1139 writeq(val64, &bar0->tx_w_round_robin_2);
1140 writeq(val64, &bar0->tx_w_round_robin_3);
1141 writeq(val64, &bar0->tx_w_round_robin_4);
1144 val64 = 0x0000010000010000ULL;
1145 writeq(val64, &bar0->tx_w_round_robin_0);
1146 val64 = 0x0100000100000100ULL;
1147 writeq(val64, &bar0->tx_w_round_robin_1);
1148 val64 = 0x0001000001000001ULL;
1149 writeq(val64, &bar0->tx_w_round_robin_2);
1150 val64 = 0x0000010000010000ULL;
1151 writeq(val64, &bar0->tx_w_round_robin_3);
1152 val64 = 0x0100000000000000ULL;
1153 writeq(val64, &bar0->tx_w_round_robin_4);
1156 val64 = 0x0001000102000001ULL;
1157 writeq(val64, &bar0->tx_w_round_robin_0);
1158 val64 = 0x0001020000010001ULL;
1159 writeq(val64, &bar0->tx_w_round_robin_1);
1160 val64 = 0x0200000100010200ULL;
1161 writeq(val64, &bar0->tx_w_round_robin_2);
1162 val64 = 0x0001000102000001ULL;
1163 writeq(val64, &bar0->tx_w_round_robin_3);
1164 val64 = 0x0001020000000000ULL;
1165 writeq(val64, &bar0->tx_w_round_robin_4);
1168 val64 = 0x0001020300010200ULL;
1169 writeq(val64, &bar0->tx_w_round_robin_0);
1170 val64 = 0x0100000102030001ULL;
1171 writeq(val64, &bar0->tx_w_round_robin_1);
1172 val64 = 0x0200010000010203ULL;
1173 writeq(val64, &bar0->tx_w_round_robin_2);
1174 val64 = 0x0001020001000001ULL;
1175 writeq(val64, &bar0->tx_w_round_robin_3);
1176 val64 = 0x0203000100000000ULL;
1177 writeq(val64, &bar0->tx_w_round_robin_4);
1180 val64 = 0x0001000203000102ULL;
1181 writeq(val64, &bar0->tx_w_round_robin_0);
1182 val64 = 0x0001020001030004ULL;
1183 writeq(val64, &bar0->tx_w_round_robin_1);
1184 val64 = 0x0001000203000102ULL;
1185 writeq(val64, &bar0->tx_w_round_robin_2);
1186 val64 = 0x0001020001030004ULL;
1187 writeq(val64, &bar0->tx_w_round_robin_3);
1188 val64 = 0x0001000000000000ULL;
1189 writeq(val64, &bar0->tx_w_round_robin_4);
1192 val64 = 0x0001020304000102ULL;
1193 writeq(val64, &bar0->tx_w_round_robin_0);
1194 val64 = 0x0304050001020001ULL;
1195 writeq(val64, &bar0->tx_w_round_robin_1);
1196 val64 = 0x0203000100000102ULL;
1197 writeq(val64, &bar0->tx_w_round_robin_2);
1198 val64 = 0x0304000102030405ULL;
1199 writeq(val64, &bar0->tx_w_round_robin_3);
1200 val64 = 0x0001000200000000ULL;
1201 writeq(val64, &bar0->tx_w_round_robin_4);
1204 val64 = 0x0001020001020300ULL;
1205 writeq(val64, &bar0->tx_w_round_robin_0);
1206 val64 = 0x0102030400010203ULL;
1207 writeq(val64, &bar0->tx_w_round_robin_1);
1208 val64 = 0x0405060001020001ULL;
1209 writeq(val64, &bar0->tx_w_round_robin_2);
1210 val64 = 0x0304050000010200ULL;
1211 writeq(val64, &bar0->tx_w_round_robin_3);
1212 val64 = 0x0102030000000000ULL;
1213 writeq(val64, &bar0->tx_w_round_robin_4);
1216 val64 = 0x0001020300040105ULL;
1217 writeq(val64, &bar0->tx_w_round_robin_0);
1218 val64 = 0x0200030106000204ULL;
1219 writeq(val64, &bar0->tx_w_round_robin_1);
1220 val64 = 0x0103000502010007ULL;
1221 writeq(val64, &bar0->tx_w_round_robin_2);
1222 val64 = 0x0304010002060500ULL;
1223 writeq(val64, &bar0->tx_w_round_robin_3);
1224 val64 = 0x0103020400000000ULL;
1225 writeq(val64, &bar0->tx_w_round_robin_4);
1229 /* Enable all configured Tx FIFO partitions */
1230 val64 = readq(&bar0->tx_fifo_partition_0);
1231 val64 |= (TX_FIFO_PARTITION_EN);
1232 writeq(val64, &bar0->tx_fifo_partition_0);
1234 /* Filling the Rx round robin registers as per the
1235 * number of Rings and steering based on QoS.
1237 switch (config->rx_ring_num) {
1239 val64 = 0x8080808080808080ULL;
1240 writeq(val64, &bar0->rts_qos_steering);
1243 val64 = 0x0000010000010000ULL;
1244 writeq(val64, &bar0->rx_w_round_robin_0);
1245 val64 = 0x0100000100000100ULL;
1246 writeq(val64, &bar0->rx_w_round_robin_1);
1247 val64 = 0x0001000001000001ULL;
1248 writeq(val64, &bar0->rx_w_round_robin_2);
1249 val64 = 0x0000010000010000ULL;
1250 writeq(val64, &bar0->rx_w_round_robin_3);
1251 val64 = 0x0100000000000000ULL;
1252 writeq(val64, &bar0->rx_w_round_robin_4);
1254 val64 = 0x8080808040404040ULL;
1255 writeq(val64, &bar0->rts_qos_steering);
1258 val64 = 0x0001000102000001ULL;
1259 writeq(val64, &bar0->rx_w_round_robin_0);
1260 val64 = 0x0001020000010001ULL;
1261 writeq(val64, &bar0->rx_w_round_robin_1);
1262 val64 = 0x0200000100010200ULL;
1263 writeq(val64, &bar0->rx_w_round_robin_2);
1264 val64 = 0x0001000102000001ULL;
1265 writeq(val64, &bar0->rx_w_round_robin_3);
1266 val64 = 0x0001020000000000ULL;
1267 writeq(val64, &bar0->rx_w_round_robin_4);
1269 val64 = 0x8080804040402020ULL;
1270 writeq(val64, &bar0->rts_qos_steering);
1273 val64 = 0x0001020300010200ULL;
1274 writeq(val64, &bar0->rx_w_round_robin_0);
1275 val64 = 0x0100000102030001ULL;
1276 writeq(val64, &bar0->rx_w_round_robin_1);
1277 val64 = 0x0200010000010203ULL;
1278 writeq(val64, &bar0->rx_w_round_robin_2);
1279 val64 = 0x0001020001000001ULL;
1280 writeq(val64, &bar0->rx_w_round_robin_3);
1281 val64 = 0x0203000100000000ULL;
1282 writeq(val64, &bar0->rx_w_round_robin_4);
1284 val64 = 0x8080404020201010ULL;
1285 writeq(val64, &bar0->rts_qos_steering);
1288 val64 = 0x0001000203000102ULL;
1289 writeq(val64, &bar0->rx_w_round_robin_0);
1290 val64 = 0x0001020001030004ULL;
1291 writeq(val64, &bar0->rx_w_round_robin_1);
1292 val64 = 0x0001000203000102ULL;
1293 writeq(val64, &bar0->rx_w_round_robin_2);
1294 val64 = 0x0001020001030004ULL;
1295 writeq(val64, &bar0->rx_w_round_robin_3);
1296 val64 = 0x0001000000000000ULL;
1297 writeq(val64, &bar0->rx_w_round_robin_4);
1299 val64 = 0x8080404020201008ULL;
1300 writeq(val64, &bar0->rts_qos_steering);
1303 val64 = 0x0001020304000102ULL;
1304 writeq(val64, &bar0->rx_w_round_robin_0);
1305 val64 = 0x0304050001020001ULL;
1306 writeq(val64, &bar0->rx_w_round_robin_1);
1307 val64 = 0x0203000100000102ULL;
1308 writeq(val64, &bar0->rx_w_round_robin_2);
1309 val64 = 0x0304000102030405ULL;
1310 writeq(val64, &bar0->rx_w_round_robin_3);
1311 val64 = 0x0001000200000000ULL;
1312 writeq(val64, &bar0->rx_w_round_robin_4);
1314 val64 = 0x8080404020100804ULL;
1315 writeq(val64, &bar0->rts_qos_steering);
1318 val64 = 0x0001020001020300ULL;
1319 writeq(val64, &bar0->rx_w_round_robin_0);
1320 val64 = 0x0102030400010203ULL;
1321 writeq(val64, &bar0->rx_w_round_robin_1);
1322 val64 = 0x0405060001020001ULL;
1323 writeq(val64, &bar0->rx_w_round_robin_2);
1324 val64 = 0x0304050000010200ULL;
1325 writeq(val64, &bar0->rx_w_round_robin_3);
1326 val64 = 0x0102030000000000ULL;
1327 writeq(val64, &bar0->rx_w_round_robin_4);
1329 val64 = 0x8080402010080402ULL;
1330 writeq(val64, &bar0->rts_qos_steering);
1333 val64 = 0x0001020300040105ULL;
1334 writeq(val64, &bar0->rx_w_round_robin_0);
1335 val64 = 0x0200030106000204ULL;
1336 writeq(val64, &bar0->rx_w_round_robin_1);
1337 val64 = 0x0103000502010007ULL;
1338 writeq(val64, &bar0->rx_w_round_robin_2);
1339 val64 = 0x0304010002060500ULL;
1340 writeq(val64, &bar0->rx_w_round_robin_3);
1341 val64 = 0x0103020400000000ULL;
1342 writeq(val64, &bar0->rx_w_round_robin_4);
1344 val64 = 0x8040201008040201ULL;
1345 writeq(val64, &bar0->rts_qos_steering);
1351 for (i = 0; i < 8; i++)
1352 writeq(val64, &bar0->rts_frm_len_n[i]);
1354 /* Set the default rts frame length for the rings configured */
1355 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1356 for (i = 0 ; i < config->rx_ring_num ; i++)
1357 writeq(val64, &bar0->rts_frm_len_n[i]);
1359 /* Set the frame length for the configured rings
1360 * desired by the user
1362 for (i = 0; i < config->rx_ring_num; i++) {
1363 /* If rts_frm_len[i] == 0 then it is assumed that user not
1364 * specified frame length steering.
1365 * If the user provides the frame length then program
1366 * the rts_frm_len register for those values or else
1367 * leave it as it is.
1369 if (rts_frm_len[i] != 0) {
1370 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1371 &bar0->rts_frm_len_n[i]);
1375 /* Program statistics memory */
1376 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1378 if (nic->device_type == XFRAME_II_DEVICE) {
1379 val64 = STAT_BC(0x320);
1380 writeq(val64, &bar0->stat_byte_cnt);
1384 * Initializing the sampling rate for the device to calculate the
1385 * bandwidth utilization.
1387 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1388 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1389 writeq(val64, &bar0->mac_link_util);
1393 * Initializing the Transmit and Receive Traffic Interrupt
1397 * TTI Initialization. Default Tx timer gets us about
1398 * 250 interrupts per sec. Continuous interrupts are enabled
1401 if (nic->device_type == XFRAME_II_DEVICE) {
1402 int count = (nic->config.bus_speed * 125)/2;
1403 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1406 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1408 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1409 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1410 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1411 if (use_continuous_tx_intrs)
1412 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1413 writeq(val64, &bar0->tti_data1_mem);
1415 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1416 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1417 TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1418 writeq(val64, &bar0->tti_data2_mem);
1420 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1421 writeq(val64, &bar0->tti_command_mem);
1424 * Once the operation completes, the Strobe bit of the command
1425 * register will be reset. We poll for this particular condition
1426 * We wait for a maximum of 500ms for the operation to complete,
1427 * if it's not complete by then we return error.
1431 val64 = readq(&bar0->tti_command_mem);
1432 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1436 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1444 if (nic->config.bimodal) {
1446 for (k = 0; k < config->rx_ring_num; k++) {
1447 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1448 val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1449 writeq(val64, &bar0->tti_command_mem);
1452 * Once the operation completes, the Strobe bit of the command
1453 * register will be reset. We poll for this particular condition
1454 * We wait for a maximum of 500ms for the operation to complete,
1455 * if it's not complete by then we return error.
1459 val64 = readq(&bar0->tti_command_mem);
1460 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1465 "%s: TTI init Failed\n",
1475 /* RTI Initialization */
1476 if (nic->device_type == XFRAME_II_DEVICE) {
1478 * Programmed to generate Apprx 500 Intrs per
1481 int count = (nic->config.bus_speed * 125)/4;
1482 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1484 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1486 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1487 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1488 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1490 writeq(val64, &bar0->rti_data1_mem);
1492 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1493 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1494 if (nic->intr_type == MSI_X)
1495 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1496 RTI_DATA2_MEM_RX_UFC_D(0x40));
1498 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1499 RTI_DATA2_MEM_RX_UFC_D(0x80));
1500 writeq(val64, &bar0->rti_data2_mem);
1502 for (i = 0; i < config->rx_ring_num; i++) {
1503 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1504 | RTI_CMD_MEM_OFFSET(i);
1505 writeq(val64, &bar0->rti_command_mem);
1508 * Once the operation completes, the Strobe bit of the
1509 * command register will be reset. We poll for this
1510 * particular condition. We wait for a maximum of 500ms
1511 * for the operation to complete, if it's not complete
1512 * by then we return error.
1516 val64 = readq(&bar0->rti_command_mem);
1517 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1521 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1532 * Initializing proper values as Pause threshold into all
1533 * the 8 Queues on Rx side.
1535 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1536 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1538 /* Disable RMAC PAD STRIPPING */
1539 add = &bar0->mac_cfg;
1540 val64 = readq(&bar0->mac_cfg);
1541 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1542 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1543 writel((u32) (val64), add);
1544 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1545 writel((u32) (val64 >> 32), (add + 4));
1546 val64 = readq(&bar0->mac_cfg);
1548 /* Enable FCS stripping by adapter */
1549 add = &bar0->mac_cfg;
1550 val64 = readq(&bar0->mac_cfg);
1551 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1552 if (nic->device_type == XFRAME_II_DEVICE)
1553 writeq(val64, &bar0->mac_cfg);
1555 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1556 writel((u32) (val64), add);
1557 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1558 writel((u32) (val64 >> 32), (add + 4));
1562 * Set the time value to be inserted in the pause frame
1563 * generated by xena.
1565 val64 = readq(&bar0->rmac_pause_cfg);
1566 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1567 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1568 writeq(val64, &bar0->rmac_pause_cfg);
1571 * Set the Threshold Limit for Generating the pause frame
1572 * If the amount of data in any Queue exceeds ratio of
1573 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1574 * pause frame is generated
1577 for (i = 0; i < 4; i++) {
1579 (((u64) 0xFF00 | nic->mac_control.
1580 mc_pause_threshold_q0q3)
1583 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1586 for (i = 0; i < 4; i++) {
1588 (((u64) 0xFF00 | nic->mac_control.
1589 mc_pause_threshold_q4q7)
1592 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1595 * TxDMA will stop Read request if the number of read split has
1596 * exceeded the limit pointed by shared_splits
1598 val64 = readq(&bar0->pic_control);
1599 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1600 writeq(val64, &bar0->pic_control);
1602 if (nic->config.bus_speed == 266) {
1603 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1604 writeq(0x0, &bar0->read_retry_delay);
1605 writeq(0x0, &bar0->write_retry_delay);
1609 * Programming the Herc to split every write transaction
1610 * that does not start on an ADB to reduce disconnects.
1612 if (nic->device_type == XFRAME_II_DEVICE) {
1613 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1614 MISC_LINK_STABILITY_PRD(3);
1615 writeq(val64, &bar0->misc_control);
1616 val64 = readq(&bar0->pic_control2);
1617 val64 &= ~(BIT(13)|BIT(14)|BIT(15));
1618 writeq(val64, &bar0->pic_control2);
1620 if (strstr(nic->product_name, "CX4")) {
1621 val64 = TMAC_AVG_IPG(0x17);
1622 writeq(val64, &bar0->tmac_avg_ipg);
1627 #define LINK_UP_DOWN_INTERRUPT 1
1628 #define MAC_RMAC_ERR_TIMER 2
1630 static int s2io_link_fault_indication(struct s2io_nic *nic)
1632 if (nic->intr_type != INTA)
1633 return MAC_RMAC_ERR_TIMER;
1634 if (nic->device_type == XFRAME_II_DEVICE)
1635 return LINK_UP_DOWN_INTERRUPT;
1637 return MAC_RMAC_ERR_TIMER;
1641 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1642 * @nic: device private variable,
1643 * @mask: A mask indicating which Intr block must be modified and,
1644 * @flag: A flag indicating whether to enable or disable the Intrs.
1645 * Description: This function will either disable or enable the interrupts
1646 * depending on the flag argument. The mask argument can be used to
1647 * enable/disable any Intr block.
1648 * Return Value: NONE.
1651 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1653 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1654 register u64 val64 = 0, temp64 = 0;
1656 /* Top level interrupt classification */
1657 /* PIC Interrupts */
1658 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1659 /* Enable PIC Intrs in the general intr mask register */
1660 val64 = TXPIC_INT_M;
1661 if (flag == ENABLE_INTRS) {
1662 temp64 = readq(&bar0->general_int_mask);
1663 temp64 &= ~((u64) val64);
1664 writeq(temp64, &bar0->general_int_mask);
1666 * If Hercules adapter enable GPIO otherwise
1667 * disable all PCIX, Flash, MDIO, IIC and GPIO
1668 * interrupts for now.
1671 if (s2io_link_fault_indication(nic) ==
1672 LINK_UP_DOWN_INTERRUPT ) {
1673 temp64 = readq(&bar0->pic_int_mask);
1674 temp64 &= ~((u64) PIC_INT_GPIO);
1675 writeq(temp64, &bar0->pic_int_mask);
1676 temp64 = readq(&bar0->gpio_int_mask);
1677 temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP);
1678 writeq(temp64, &bar0->gpio_int_mask);
1680 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1683 * No MSI Support is available presently, so TTI and
1684 * RTI interrupts are also disabled.
1686 } else if (flag == DISABLE_INTRS) {
1688 * Disable PIC Intrs in the general
1689 * intr mask register
1691 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1692 temp64 = readq(&bar0->general_int_mask);
1694 writeq(val64, &bar0->general_int_mask);
1698 /* MAC Interrupts */
1699 /* Enabling/Disabling MAC interrupts */
1700 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1701 val64 = TXMAC_INT_M | RXMAC_INT_M;
1702 if (flag == ENABLE_INTRS) {
1703 temp64 = readq(&bar0->general_int_mask);
1704 temp64 &= ~((u64) val64);
1705 writeq(temp64, &bar0->general_int_mask);
1707 * All MAC block error interrupts are disabled for now
1710 } else if (flag == DISABLE_INTRS) {
1712 * Disable MAC Intrs in the general intr mask register
1714 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1715 writeq(DISABLE_ALL_INTRS,
1716 &bar0->mac_rmac_err_mask);
1718 temp64 = readq(&bar0->general_int_mask);
1720 writeq(val64, &bar0->general_int_mask);
1724 /* Tx traffic interrupts */
1725 if (mask & TX_TRAFFIC_INTR) {
1726 val64 = TXTRAFFIC_INT_M;
1727 if (flag == ENABLE_INTRS) {
1728 temp64 = readq(&bar0->general_int_mask);
1729 temp64 &= ~((u64) val64);
1730 writeq(temp64, &bar0->general_int_mask);
1732 * Enable all the Tx side interrupts
1733 * writing 0 Enables all 64 TX interrupt levels
1735 writeq(0x0, &bar0->tx_traffic_mask);
1736 } else if (flag == DISABLE_INTRS) {
1738 * Disable Tx Traffic Intrs in the general intr mask
1741 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1742 temp64 = readq(&bar0->general_int_mask);
1744 writeq(val64, &bar0->general_int_mask);
1748 /* Rx traffic interrupts */
1749 if (mask & RX_TRAFFIC_INTR) {
1750 val64 = RXTRAFFIC_INT_M;
1751 if (flag == ENABLE_INTRS) {
1752 temp64 = readq(&bar0->general_int_mask);
1753 temp64 &= ~((u64) val64);
1754 writeq(temp64, &bar0->general_int_mask);
1755 /* writing 0 Enables all 8 RX interrupt levels */
1756 writeq(0x0, &bar0->rx_traffic_mask);
1757 } else if (flag == DISABLE_INTRS) {
1759 * Disable Rx Traffic Intrs in the general intr mask
1762 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1763 temp64 = readq(&bar0->general_int_mask);
1765 writeq(val64, &bar0->general_int_mask);
1771 * verify_pcc_quiescent- Checks for PCC quiescent state
1772 * Return: 1 If PCC is quiescence
1773 * 0 If PCC is not quiescence
1775 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
1778 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1779 u64 val64 = readq(&bar0->adapter_status);
1781 herc = (sp->device_type == XFRAME_II_DEVICE);
1783 if (flag == FALSE) {
1784 if ((!herc && (get_xena_rev_id(sp->pdev) >= 4)) || herc) {
1785 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
1788 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
1792 if ((!herc && (get_xena_rev_id(sp->pdev) >= 4)) || herc) {
1793 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1794 ADAPTER_STATUS_RMAC_PCC_IDLE))
1797 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1798 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
1806 * verify_xena_quiescence - Checks whether the H/W is ready
1807 * Description: Returns whether the H/W is ready to go or not. Depending
1808 * on whether adapter enable bit was written or not the comparison
1809 * differs and the calling function passes the input argument flag to
1811 * Return: 1 If xena is quiescence
1812 * 0 If Xena is not quiescence
1815 static int verify_xena_quiescence(struct s2io_nic *sp)
1818 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1819 u64 val64 = readq(&bar0->adapter_status);
1820 mode = s2io_verify_pci_mode(sp);
1822 if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
1823 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
1826 if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
1827 DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
1830 if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
1831 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
1834 if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
1835 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
1838 if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
1839 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
1842 if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
1843 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
1846 if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
1847 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
1850 if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
1851 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
1856 * In PCI 33 mode, the P_PLL is not used, and therefore,
1857 * the the P_PLL_LOCK bit in the adapter_status register will
1860 if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
1861 sp->device_type == XFRAME_II_DEVICE && mode !=
1863 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
1866 if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1867 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1868 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
1875 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1876 * @sp: Pointer to device specifc structure
1878 * New procedure to clear mac address reading problems on Alpha platforms
1882 static void fix_mac_address(struct s2io_nic * sp)
1884 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1888 while (fix_mac[i] != END_SIGN) {
1889 writeq(fix_mac[i++], &bar0->gpio_control);
1891 val64 = readq(&bar0->gpio_control);
1896 * start_nic - Turns the device on
1897 * @nic : device private variable.
1899 * This function actually turns the device on. Before this function is
1900 * called,all Registers are configured from their reset states
1901 * and shared memory is allocated but the NIC is still quiescent. On
1902 * calling this function, the device interrupts are cleared and the NIC is
1903 * literally switched on by writing into the adapter control register.
1905 * SUCCESS on success and -1 on failure.
1908 static int start_nic(struct s2io_nic *nic)
1910 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1911 struct net_device *dev = nic->dev;
1912 register u64 val64 = 0;
1914 struct mac_info *mac_control;
1915 struct config_param *config;
1917 mac_control = &nic->mac_control;
1918 config = &nic->config;
1920 /* PRC Initialization and configuration */
1921 for (i = 0; i < config->rx_ring_num; i++) {
1922 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1923 &bar0->prc_rxd0_n[i]);
1925 val64 = readq(&bar0->prc_ctrl_n[i]);
1926 if (nic->config.bimodal)
1927 val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
1928 if (nic->rxd_mode == RXD_MODE_1)
1929 val64 |= PRC_CTRL_RC_ENABLED;
1931 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1932 if (nic->device_type == XFRAME_II_DEVICE)
1933 val64 |= PRC_CTRL_GROUP_READS;
1934 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
1935 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
1936 writeq(val64, &bar0->prc_ctrl_n[i]);
1939 if (nic->rxd_mode == RXD_MODE_3B) {
1940 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1941 val64 = readq(&bar0->rx_pa_cfg);
1942 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1943 writeq(val64, &bar0->rx_pa_cfg);
1947 * Enabling MC-RLDRAM. After enabling the device, we timeout
1948 * for around 100ms, which is approximately the time required
1949 * for the device to be ready for operation.
1951 val64 = readq(&bar0->mc_rldram_mrs);
1952 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1953 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1954 val64 = readq(&bar0->mc_rldram_mrs);
1956 msleep(100); /* Delay by around 100 ms. */
1958 /* Enabling ECC Protection. */
1959 val64 = readq(&bar0->adapter_control);
1960 val64 &= ~ADAPTER_ECC_EN;
1961 writeq(val64, &bar0->adapter_control);
1964 * Clearing any possible Link state change interrupts that
1965 * could have popped up just before Enabling the card.
1967 val64 = readq(&bar0->mac_rmac_err_reg);
1969 writeq(val64, &bar0->mac_rmac_err_reg);
1972 * Verify if the device is ready to be enabled, if so enable
1975 val64 = readq(&bar0->adapter_status);
1976 if (!verify_xena_quiescence(nic)) {
1977 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
1978 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
1979 (unsigned long long) val64);
1984 * With some switches, link might be already up at this point.
1985 * Because of this weird behavior, when we enable laser,
1986 * we may not get link. We need to handle this. We cannot
1987 * figure out which switch is misbehaving. So we are forced to
1988 * make a global change.
1991 /* Enabling Laser. */
1992 val64 = readq(&bar0->adapter_control);
1993 val64 |= ADAPTER_EOI_TX_ON;
1994 writeq(val64, &bar0->adapter_control);
1996 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
1998 * Dont see link state interrupts initally on some switches,
1999 * so directly scheduling the link state task here.
2001 schedule_work(&nic->set_link_task);
2003 /* SXE-002: Initialize link and activity LED */
2004 subid = nic->pdev->subsystem_device;
2005 if (((subid & 0xFF) >= 0x07) &&
2006 (nic->device_type == XFRAME_I_DEVICE)) {
2007 val64 = readq(&bar0->gpio_control);
2008 val64 |= 0x0000800000000000ULL;
2009 writeq(val64, &bar0->gpio_control);
2010 val64 = 0x0411040400000000ULL;
2011 writeq(val64, (void __iomem *)bar0 + 0x2700);
2017 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2019 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2020 TxD *txdlp, int get_off)
2022 struct s2io_nic *nic = fifo_data->nic;
2023 struct sk_buff *skb;
2028 if (txds->Host_Control == (u64)(long)nic->ufo_in_band_v) {
2029 pci_unmap_single(nic->pdev, (dma_addr_t)
2030 txds->Buffer_Pointer, sizeof(u64),
2035 skb = (struct sk_buff *) ((unsigned long)
2036 txds->Host_Control);
2038 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2041 pci_unmap_single(nic->pdev, (dma_addr_t)
2042 txds->Buffer_Pointer,
2043 skb->len - skb->data_len,
2045 frg_cnt = skb_shinfo(skb)->nr_frags;
2048 for (j = 0; j < frg_cnt; j++, txds++) {
2049 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2050 if (!txds->Buffer_Pointer)
2052 pci_unmap_page(nic->pdev, (dma_addr_t)
2053 txds->Buffer_Pointer,
2054 frag->size, PCI_DMA_TODEVICE);
2057 memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
2062 * free_tx_buffers - Free all queued Tx buffers
2063 * @nic : device private variable.
2065 * Free all queued Tx buffers.
2066 * Return Value: void
2069 static void free_tx_buffers(struct s2io_nic *nic)
2071 struct net_device *dev = nic->dev;
2072 struct sk_buff *skb;
2075 struct mac_info *mac_control;
2076 struct config_param *config;
2079 mac_control = &nic->mac_control;
2080 config = &nic->config;
2082 for (i = 0; i < config->tx_fifo_num; i++) {
2083 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2084 txdp = (struct TxD *) mac_control->fifos[i].list_info[j].
2086 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2093 "%s:forcibly freeing %d skbs on FIFO%d\n",
2095 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2096 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2101 * stop_nic - To stop the nic
2102 * @nic ; device private variable.
2104 * This function does exactly the opposite of what the start_nic()
2105 * function does. This function is called to stop the device.
2110 static void stop_nic(struct s2io_nic *nic)
2112 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2113 register u64 val64 = 0;
2115 struct mac_info *mac_control;
2116 struct config_param *config;
2118 mac_control = &nic->mac_control;
2119 config = &nic->config;
2121 /* Disable all interrupts */
2122 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2123 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2124 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
2125 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2127 /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2128 val64 = readq(&bar0->adapter_control);
2129 val64 &= ~(ADAPTER_CNTL_EN);
2130 writeq(val64, &bar0->adapter_control);
2133 static int fill_rxd_3buf(struct s2io_nic *nic, struct RxD_t *rxdp, struct \
2136 struct net_device *dev = nic->dev;
2137 struct sk_buff *frag_list;
2140 /* Buffer-1 receives L3/L4 headers */
2141 ((struct RxD3*)rxdp)->Buffer1_ptr = pci_map_single
2142 (nic->pdev, skb->data, l3l4hdr_size + 4,
2143 PCI_DMA_FROMDEVICE);
2145 /* skb_shinfo(skb)->frag_list will have L4 data payload */
2146 skb_shinfo(skb)->frag_list = dev_alloc_skb(dev->mtu + ALIGN_SIZE);
2147 if (skb_shinfo(skb)->frag_list == NULL) {
2148 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n ", dev->name);
2151 frag_list = skb_shinfo(skb)->frag_list;
2152 skb->truesize += frag_list->truesize;
2153 frag_list->next = NULL;
2154 tmp = (void *)ALIGN((long)frag_list->data, ALIGN_SIZE + 1);
2155 frag_list->data = tmp;
2156 frag_list->tail = tmp;
2158 /* Buffer-2 receives L4 data payload */
2159 ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev,
2160 frag_list->data, dev->mtu,
2161 PCI_DMA_FROMDEVICE);
2162 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
2163 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
2169 * fill_rx_buffers - Allocates the Rx side skbs
2170 * @nic: device private variable
2171 * @ring_no: ring number
2173 * The function allocates Rx side skbs and puts the physical
2174 * address of these buffers into the RxD buffer pointers, so that the NIC
2175 * can DMA the received frame into these locations.
2176 * The NIC supports 3 receive modes, viz
2178 * 2. three buffer and
2179 * 3. Five buffer modes.
2180 * Each mode defines how many fragments the received frame will be split
2181 * up into by the NIC. The frame is split into L3 header, L4 Header,
2182 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2183 * is split into 3 fragments. As of now only single buffer mode is
2186 * SUCCESS on success or an appropriate -ve value on failure.
2189 static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2191 struct net_device *dev = nic->dev;
2192 struct sk_buff *skb;
2194 int off, off1, size, block_no, block_no1;
2197 struct mac_info *mac_control;
2198 struct config_param *config;
2201 unsigned long flags;
2202 struct RxD_t *first_rxdp = NULL;
2204 mac_control = &nic->mac_control;
2205 config = &nic->config;
2206 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2207 atomic_read(&nic->rx_bufs_left[ring_no]);
2209 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
2210 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2211 while (alloc_tab < alloc_cnt) {
2212 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2214 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2216 rxdp = mac_control->rings[ring_no].
2217 rx_blocks[block_no].rxds[off].virt_addr;
2219 if ((block_no == block_no1) && (off == off1) &&
2220 (rxdp->Host_Control)) {
2221 DBG_PRINT(INTR_DBG, "%s: Get and Put",
2223 DBG_PRINT(INTR_DBG, " info equated\n");
2226 if (off && (off == rxd_count[nic->rxd_mode])) {
2227 mac_control->rings[ring_no].rx_curr_put_info.
2229 if (mac_control->rings[ring_no].rx_curr_put_info.
2230 block_index == mac_control->rings[ring_no].
2232 mac_control->rings[ring_no].rx_curr_put_info.
2234 block_no = mac_control->rings[ring_no].
2235 rx_curr_put_info.block_index;
2236 if (off == rxd_count[nic->rxd_mode])
2238 mac_control->rings[ring_no].rx_curr_put_info.
2240 rxdp = mac_control->rings[ring_no].
2241 rx_blocks[block_no].block_virt_addr;
2242 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2246 spin_lock_irqsave(&nic->put_lock, flags);
2247 mac_control->rings[ring_no].put_pos =
2248 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2249 spin_unlock_irqrestore(&nic->put_lock, flags);
2251 mac_control->rings[ring_no].put_pos =
2252 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2254 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2255 ((nic->rxd_mode >= RXD_MODE_3A) &&
2256 (rxdp->Control_2 & BIT(0)))) {
2257 mac_control->rings[ring_no].rx_curr_put_info.
2261 /* calculate size of skb based on ring mode */
2262 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2263 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2264 if (nic->rxd_mode == RXD_MODE_1)
2265 size += NET_IP_ALIGN;
2266 else if (nic->rxd_mode == RXD_MODE_3B)
2267 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2269 size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
2272 skb = dev_alloc_skb(size);
2274 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
2275 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
2278 first_rxdp->Control_1 |= RXD_OWN_XENA;
2282 if (nic->rxd_mode == RXD_MODE_1) {
2283 /* 1 buffer mode - normal operation mode */
2284 memset(rxdp, 0, sizeof(struct RxD1));
2285 skb_reserve(skb, NET_IP_ALIGN);
2286 ((struct RxD1*)rxdp)->Buffer0_ptr = pci_map_single
2287 (nic->pdev, skb->data, size - NET_IP_ALIGN,
2288 PCI_DMA_FROMDEVICE);
2289 rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2291 } else if (nic->rxd_mode >= RXD_MODE_3A) {
2293 * 2 or 3 buffer mode -
2294 * Both 2 buffer mode and 3 buffer mode provides 128
2295 * byte aligned receive buffers.
2297 * 3 buffer mode provides header separation where in
2298 * skb->data will have L3/L4 headers where as
2299 * skb_shinfo(skb)->frag_list will have the L4 data
2303 memset(rxdp, 0, sizeof(struct RxD3));
2304 ba = &mac_control->rings[ring_no].ba[block_no][off];
2305 skb_reserve(skb, BUF0_LEN);
2306 tmp = (u64)(unsigned long) skb->data;
2309 skb->data = (void *) (unsigned long)tmp;
2310 skb->tail = (void *) (unsigned long)tmp;
2312 if (!(((struct RxD3*)rxdp)->Buffer0_ptr))
2313 ((struct RxD3*)rxdp)->Buffer0_ptr =
2314 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2315 PCI_DMA_FROMDEVICE);
2317 pci_dma_sync_single_for_device(nic->pdev,
2318 (dma_addr_t) ((struct RxD3*)rxdp)->Buffer0_ptr,
2319 BUF0_LEN, PCI_DMA_FROMDEVICE);
2320 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2321 if (nic->rxd_mode == RXD_MODE_3B) {
2322 /* Two buffer mode */
2325 * Buffer2 will have L3/L4 header plus
2328 ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single
2329 (nic->pdev, skb->data, dev->mtu + 4,
2330 PCI_DMA_FROMDEVICE);
2332 /* Buffer-1 will be dummy buffer. Not used */
2333 if (!(((struct RxD3*)rxdp)->Buffer1_ptr)) {
2334 ((struct RxD3*)rxdp)->Buffer1_ptr =
2335 pci_map_single(nic->pdev,
2337 PCI_DMA_FROMDEVICE);
2339 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2340 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2344 if (fill_rxd_3buf(nic, rxdp, skb) == -ENOMEM) {
2345 dev_kfree_skb_irq(skb);
2348 first_rxdp->Control_1 |=
2354 rxdp->Control_2 |= BIT(0);
2356 rxdp->Host_Control = (unsigned long) (skb);
2357 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2358 rxdp->Control_1 |= RXD_OWN_XENA;
2360 if (off == (rxd_count[nic->rxd_mode] + 1))
2362 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2364 rxdp->Control_2 |= SET_RXD_MARKER;
2365 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2368 first_rxdp->Control_1 |= RXD_OWN_XENA;
2372 atomic_inc(&nic->rx_bufs_left[ring_no]);
2377 /* Transfer ownership of first descriptor to adapter just before
2378 * exiting. Before that, use memory barrier so that ownership
2379 * and other fields are seen by adapter correctly.
2383 first_rxdp->Control_1 |= RXD_OWN_XENA;
2389 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2391 struct net_device *dev = sp->dev;
2393 struct sk_buff *skb;
2395 struct mac_info *mac_control;
2398 mac_control = &sp->mac_control;
2399 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2400 rxdp = mac_control->rings[ring_no].
2401 rx_blocks[blk].rxds[j].virt_addr;
2402 skb = (struct sk_buff *)
2403 ((unsigned long) rxdp->Host_Control);
2407 if (sp->rxd_mode == RXD_MODE_1) {
2408 pci_unmap_single(sp->pdev, (dma_addr_t)
2409 ((struct RxD1*)rxdp)->Buffer0_ptr,
2411 HEADER_ETHERNET_II_802_3_SIZE
2412 + HEADER_802_2_SIZE +
2414 PCI_DMA_FROMDEVICE);
2415 memset(rxdp, 0, sizeof(struct RxD1));
2416 } else if(sp->rxd_mode == RXD_MODE_3B) {
2417 ba = &mac_control->rings[ring_no].
2419 pci_unmap_single(sp->pdev, (dma_addr_t)
2420 ((struct RxD3*)rxdp)->Buffer0_ptr,
2422 PCI_DMA_FROMDEVICE);
2423 pci_unmap_single(sp->pdev, (dma_addr_t)
2424 ((struct RxD3*)rxdp)->Buffer1_ptr,
2426 PCI_DMA_FROMDEVICE);
2427 pci_unmap_single(sp->pdev, (dma_addr_t)
2428 ((struct RxD3*)rxdp)->Buffer2_ptr,
2430 PCI_DMA_FROMDEVICE);
2431 memset(rxdp, 0, sizeof(struct RxD3));
2433 pci_unmap_single(sp->pdev, (dma_addr_t)
2434 ((struct RxD3*)rxdp)->Buffer0_ptr, BUF0_LEN,
2435 PCI_DMA_FROMDEVICE);
2436 pci_unmap_single(sp->pdev, (dma_addr_t)
2437 ((struct RxD3*)rxdp)->Buffer1_ptr,
2439 PCI_DMA_FROMDEVICE);
2440 pci_unmap_single(sp->pdev, (dma_addr_t)
2441 ((struct RxD3*)rxdp)->Buffer2_ptr, dev->mtu,
2442 PCI_DMA_FROMDEVICE);
2443 memset(rxdp, 0, sizeof(struct RxD3));
2446 atomic_dec(&sp->rx_bufs_left[ring_no]);
2451 * free_rx_buffers - Frees all Rx buffers
2452 * @sp: device private variable.
2454 * This function will free all Rx buffers allocated by host.
2459 static void free_rx_buffers(struct s2io_nic *sp)
2461 struct net_device *dev = sp->dev;
2462 int i, blk = 0, buf_cnt = 0;
2463 struct mac_info *mac_control;
2464 struct config_param *config;
2466 mac_control = &sp->mac_control;
2467 config = &sp->config;
2469 for (i = 0; i < config->rx_ring_num; i++) {
2470 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2471 free_rxd_blk(sp,i,blk);
2473 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2474 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2475 mac_control->rings[i].rx_curr_put_info.offset = 0;
2476 mac_control->rings[i].rx_curr_get_info.offset = 0;
2477 atomic_set(&sp->rx_bufs_left[i], 0);
2478 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2479 dev->name, buf_cnt, i);
2484 * s2io_poll - Rx interrupt handler for NAPI support
2485 * @dev : pointer to the device structure.
2486 * @budget : The number of packets that were budgeted to be processed
2487 * during one pass through the 'Poll" function.
2489 * Comes into picture only if NAPI support has been incorporated. It does
2490 * the same thing that rx_intr_handler does, but not in a interrupt context
2491 * also It will process only a given number of packets.
2493 * 0 on success and 1 if there are No Rx packets to be processed.
2496 static int s2io_poll(struct net_device *dev, int *budget)
2498 struct s2io_nic *nic = dev->priv;
2499 int pkt_cnt = 0, org_pkts_to_process;
2500 struct mac_info *mac_control;
2501 struct config_param *config;
2502 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2505 atomic_inc(&nic->isr_cnt);
2506 mac_control = &nic->mac_control;
2507 config = &nic->config;
2509 nic->pkts_to_process = *budget;
2510 if (nic->pkts_to_process > dev->quota)
2511 nic->pkts_to_process = dev->quota;
2512 org_pkts_to_process = nic->pkts_to_process;
2514 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
2515 readl(&bar0->rx_traffic_int);
2517 for (i = 0; i < config->rx_ring_num; i++) {
2518 rx_intr_handler(&mac_control->rings[i]);
2519 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2520 if (!nic->pkts_to_process) {
2521 /* Quota for the current iteration has been met */
2528 dev->quota -= pkt_cnt;
2530 netif_rx_complete(dev);
2532 for (i = 0; i < config->rx_ring_num; i++) {
2533 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2534 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2535 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2539 /* Re enable the Rx interrupts. */
2540 writeq(0x0, &bar0->rx_traffic_mask);
2541 readl(&bar0->rx_traffic_mask);
2542 atomic_dec(&nic->isr_cnt);
2546 dev->quota -= pkt_cnt;
2549 for (i = 0; i < config->rx_ring_num; i++) {
2550 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2551 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2552 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2556 atomic_dec(&nic->isr_cnt);
2560 #ifdef CONFIG_NET_POLL_CONTROLLER
2562 * s2io_netpoll - netpoll event handler entry point
2563 * @dev : pointer to the device structure.
2565 * This function will be called by upper layer to check for events on the
2566 * interface in situations where interrupts are disabled. It is used for
2567 * specific in-kernel networking tasks, such as remote consoles and kernel
2568 * debugging over the network (example netdump in RedHat).
2570 static void s2io_netpoll(struct net_device *dev)
2572 struct s2io_nic *nic = dev->priv;
2573 struct mac_info *mac_control;
2574 struct config_param *config;
2575 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2576 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2579 disable_irq(dev->irq);
2581 atomic_inc(&nic->isr_cnt);
2582 mac_control = &nic->mac_control;
2583 config = &nic->config;
2585 writeq(val64, &bar0->rx_traffic_int);
2586 writeq(val64, &bar0->tx_traffic_int);
2588 /* we need to free up the transmitted skbufs or else netpoll will
2589 * run out of skbs and will fail and eventually netpoll application such
2590 * as netdump will fail.
2592 for (i = 0; i < config->tx_fifo_num; i++)
2593 tx_intr_handler(&mac_control->fifos[i]);
2595 /* check for received packet and indicate up to network */
2596 for (i = 0; i < config->rx_ring_num; i++)
2597 rx_intr_handler(&mac_control->rings[i]);
2599 for (i = 0; i < config->rx_ring_num; i++) {
2600 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2601 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2602 DBG_PRINT(ERR_DBG, " in Rx Netpoll!!\n");
2606 atomic_dec(&nic->isr_cnt);
2607 enable_irq(dev->irq);
2613 * rx_intr_handler - Rx interrupt handler
2614 * @nic: device private variable.
2616 * If the interrupt is because of a received frame or if the
2617 * receive ring contains fresh as yet un-processed frames,this function is
2618 * called. It picks out the RxD at which place the last Rx processing had
2619 * stopped and sends the skb to the OSM's Rx handler and then increments
2624 static void rx_intr_handler(struct ring_info *ring_data)
2626 struct s2io_nic *nic = ring_data->nic;
2627 struct net_device *dev = (struct net_device *) nic->dev;
2628 int get_block, put_block, put_offset;
2629 struct rx_curr_get_info get_info, put_info;
2631 struct sk_buff *skb;
2635 spin_lock(&nic->rx_lock);
2636 if (atomic_read(&nic->card_state) == CARD_DOWN) {
2637 DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n",
2638 __FUNCTION__, dev->name);
2639 spin_unlock(&nic->rx_lock);
2643 get_info = ring_data->rx_curr_get_info;
2644 get_block = get_info.block_index;
2645 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2646 put_block = put_info.block_index;
2647 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2649 spin_lock(&nic->put_lock);
2650 put_offset = ring_data->put_pos;
2651 spin_unlock(&nic->put_lock);
2653 put_offset = ring_data->put_pos;
2655 while (RXD_IS_UP2DT(rxdp)) {
2657 * If your are next to put index then it's
2658 * FIFO full condition
2660 if ((get_block == put_block) &&
2661 (get_info.offset + 1) == put_info.offset) {
2662 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
2665 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2667 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2669 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2670 spin_unlock(&nic->rx_lock);
2673 if (nic->rxd_mode == RXD_MODE_1) {
2674 pci_unmap_single(nic->pdev, (dma_addr_t)
2675 ((struct RxD1*)rxdp)->Buffer0_ptr,
2677 HEADER_ETHERNET_II_802_3_SIZE +
2680 PCI_DMA_FROMDEVICE);
2681 } else if (nic->rxd_mode == RXD_MODE_3B) {
2682 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2683 ((struct RxD3*)rxdp)->Buffer0_ptr,
2684 BUF0_LEN, PCI_DMA_FROMDEVICE);
2685 pci_unmap_single(nic->pdev, (dma_addr_t)
2686 ((struct RxD3*)rxdp)->Buffer2_ptr,
2688 PCI_DMA_FROMDEVICE);
2690 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2691 ((struct RxD3*)rxdp)->Buffer0_ptr, BUF0_LEN,
2692 PCI_DMA_FROMDEVICE);
2693 pci_unmap_single(nic->pdev, (dma_addr_t)
2694 ((struct RxD3*)rxdp)->Buffer1_ptr,
2696 PCI_DMA_FROMDEVICE);
2697 pci_unmap_single(nic->pdev, (dma_addr_t)
2698 ((struct RxD3*)rxdp)->Buffer2_ptr,
2699 dev->mtu, PCI_DMA_FROMDEVICE);
2701 prefetch(skb->data);
2702 rx_osm_handler(ring_data, rxdp);
2704 ring_data->rx_curr_get_info.offset = get_info.offset;
2705 rxdp = ring_data->rx_blocks[get_block].
2706 rxds[get_info.offset].virt_addr;
2707 if (get_info.offset == rxd_count[nic->rxd_mode]) {
2708 get_info.offset = 0;
2709 ring_data->rx_curr_get_info.offset = get_info.offset;
2711 if (get_block == ring_data->block_count)
2713 ring_data->rx_curr_get_info.block_index = get_block;
2714 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2717 nic->pkts_to_process -= 1;
2718 if ((napi) && (!nic->pkts_to_process))
2721 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2725 /* Clear all LRO sessions before exiting */
2726 for (i=0; i<MAX_LRO_SESSIONS; i++) {
2727 struct lro *lro = &nic->lro0_n[i];
2729 update_L3L4_header(nic, lro);
2730 queue_rx_frame(lro->parent);
2731 clear_lro_session(lro);
2736 spin_unlock(&nic->rx_lock);
2740 * tx_intr_handler - Transmit interrupt handler
2741 * @nic : device private variable
2743 * If an interrupt was raised to indicate DMA complete of the
2744 * Tx packet, this function is called. It identifies the last TxD
2745 * whose buffer was freed and frees all skbs whose data have already
2746 * DMA'ed into the NICs internal memory.
2751 static void tx_intr_handler(struct fifo_info *fifo_data)
2753 struct s2io_nic *nic = fifo_data->nic;
2754 struct net_device *dev = (struct net_device *) nic->dev;
2755 struct tx_curr_get_info get_info, put_info;
2756 struct sk_buff *skb;
2759 get_info = fifo_data->tx_curr_get_info;
2760 memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
2761 txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
2763 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2764 (get_info.offset != put_info.offset) &&
2765 (txdlp->Host_Control)) {
2766 /* Check for TxD errors */
2767 if (txdlp->Control_1 & TXD_T_CODE) {
2768 unsigned long long err;
2769 err = txdlp->Control_1 & TXD_T_CODE;
2771 nic->mac_control.stats_info->sw_stat.
2774 if ((err >> 48) == 0xA) {
2775 DBG_PRINT(TX_DBG, "TxD returned due \
2776 to loss of link\n");
2779 DBG_PRINT(ERR_DBG, "***TxD error %llx\n", err);
2783 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
2785 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2787 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2791 /* Updating the statistics block */
2792 nic->stats.tx_bytes += skb->len;
2793 dev_kfree_skb_irq(skb);
2796 if (get_info.offset == get_info.fifo_len + 1)
2797 get_info.offset = 0;
2798 txdlp = (struct TxD *) fifo_data->list_info
2799 [get_info.offset].list_virt_addr;
2800 fifo_data->tx_curr_get_info.offset =
2804 spin_lock(&nic->tx_lock);
2805 if (netif_queue_stopped(dev))
2806 netif_wake_queue(dev);
2807 spin_unlock(&nic->tx_lock);
2811 * s2io_mdio_write - Function to write in to MDIO registers
2812 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2813 * @addr : address value
2814 * @value : data value
2815 * @dev : pointer to net_device structure
2817 * This function is used to write values to the MDIO registers
2820 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
2823 struct s2io_nic *sp = dev->priv;
2824 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2826 //address transaction
2827 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2828 | MDIO_MMD_DEV_ADDR(mmd_type)
2829 | MDIO_MMS_PRT_ADDR(0x0);
2830 writeq(val64, &bar0->mdio_control);
2831 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2832 writeq(val64, &bar0->mdio_control);
2837 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2838 | MDIO_MMD_DEV_ADDR(mmd_type)
2839 | MDIO_MMS_PRT_ADDR(0x0)
2840 | MDIO_MDIO_DATA(value)
2841 | MDIO_OP(MDIO_OP_WRITE_TRANS);
2842 writeq(val64, &bar0->mdio_control);
2843 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2844 writeq(val64, &bar0->mdio_control);
2848 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2849 | MDIO_MMD_DEV_ADDR(mmd_type)
2850 | MDIO_MMS_PRT_ADDR(0x0)
2851 | MDIO_OP(MDIO_OP_READ_TRANS);
2852 writeq(val64, &bar0->mdio_control);
2853 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2854 writeq(val64, &bar0->mdio_control);
2860 * s2io_mdio_read - Function to write in to MDIO registers
2861 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2862 * @addr : address value
2863 * @dev : pointer to net_device structure
2865 * This function is used to read values to the MDIO registers
2868 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
2872 struct s2io_nic *sp = dev->priv;
2873 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2875 /* address transaction */
2876 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2877 | MDIO_MMD_DEV_ADDR(mmd_type)
2878 | MDIO_MMS_PRT_ADDR(0x0);
2879 writeq(val64, &bar0->mdio_control);
2880 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2881 writeq(val64, &bar0->mdio_control);
2884 /* Data transaction */
2886 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2887 | MDIO_MMD_DEV_ADDR(mmd_type)
2888 | MDIO_MMS_PRT_ADDR(0x0)
2889 | MDIO_OP(MDIO_OP_READ_TRANS);
2890 writeq(val64, &bar0->mdio_control);
2891 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2892 writeq(val64, &bar0->mdio_control);
2895 /* Read the value from regs */
2896 rval64 = readq(&bar0->mdio_control);
2897 rval64 = rval64 & 0xFFFF0000;
2898 rval64 = rval64 >> 16;
2902 * s2io_chk_xpak_counter - Function to check the status of the xpak counters
2903 * @counter : couter value to be updated
2904 * @flag : flag to indicate the status
2905 * @type : counter type
2907 * This function is to check the status of the xpak counters value
2911 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
2916 for(i = 0; i <index; i++)
2921 *counter = *counter + 1;
2922 val64 = *regs_stat & mask;
2923 val64 = val64 >> (index * 0x2);
2930 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
2931 "service. Excessive temperatures may "
2932 "result in premature transceiver "
2936 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
2937 "service Excessive bias currents may "
2938 "indicate imminent laser diode "
2942 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
2943 "service Excessive laser output "
2944 "power may saturate far-end "
2948 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
2953 val64 = val64 << (index * 0x2);
2954 *regs_stat = (*regs_stat & (~mask)) | (val64);
2957 *regs_stat = *regs_stat & (~mask);
2962 * s2io_updt_xpak_counter - Function to update the xpak counters
2963 * @dev : pointer to net_device struct
2965 * This function is to upate the status of the xpak counters value
2968 static void s2io_updt_xpak_counter(struct net_device *dev)
2976 struct s2io_nic *sp = dev->priv;
2977 struct stat_block *stat_info = sp->mac_control.stats_info;
2979 /* Check the communication with the MDIO slave */
2982 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
2983 if((val64 == 0xFFFF) || (val64 == 0x0000))
2985 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
2986 "Returned %llx\n", (unsigned long long)val64);
2990 /* Check for the expecte value of 2040 at PMA address 0x0000 */
2993 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
2994 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
2995 (unsigned long long)val64);
2999 /* Loading the DOM register to MDIO register */
3001 s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3002 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3004 /* Reading the Alarm flags */
3007 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3009 flag = CHECKBIT(val64, 0x7);
3011 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3012 &stat_info->xpak_stat.xpak_regs_stat,
3015 if(CHECKBIT(val64, 0x6))
3016 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3018 flag = CHECKBIT(val64, 0x3);
3020 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3021 &stat_info->xpak_stat.xpak_regs_stat,
3024 if(CHECKBIT(val64, 0x2))
3025 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3027 flag = CHECKBIT(val64, 0x1);
3029 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3030 &stat_info->xpak_stat.xpak_regs_stat,
3033 if(CHECKBIT(val64, 0x0))
3034 stat_info->xpak_stat.alarm_laser_output_power_low++;
3036 /* Reading the Warning flags */
3039 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3041 if(CHECKBIT(val64, 0x7))
3042 stat_info->xpak_stat.warn_transceiver_temp_high++;
3044 if(CHECKBIT(val64, 0x6))
3045 stat_info->xpak_stat.warn_transceiver_temp_low++;
3047 if(CHECKBIT(val64, 0x3))
3048 stat_info->xpak_stat.warn_laser_bias_current_high++;
3050 if(CHECKBIT(val64, 0x2))
3051 stat_info->xpak_stat.warn_laser_bias_current_low++;
3053 if(CHECKBIT(val64, 0x1))
3054 stat_info->xpak_stat.warn_laser_output_power_high++;
3056 if(CHECKBIT(val64, 0x0))
3057 stat_info->xpak_stat.warn_laser_output_power_low++;
3061 * alarm_intr_handler - Alarm Interrrupt handler
3062 * @nic: device private variable
3063 * Description: If the interrupt was neither because of Rx packet or Tx
3064 * complete, this function is called. If the interrupt was to indicate
3065 * a loss of link, the OSM link status handler is invoked for any other
3066 * alarm interrupt the block that raised the interrupt is displayed
3067 * and a H/W reset is issued.
3072 static void alarm_intr_handler(struct s2io_nic *nic)
3074 struct net_device *dev = (struct net_device *) nic->dev;
3075 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3076 register u64 val64 = 0, err_reg = 0;
3079 if (atomic_read(&nic->card_state) == CARD_DOWN)
3081 nic->mac_control.stats_info->sw_stat.ring_full_cnt = 0;
3082 /* Handling the XPAK counters update */
3083 if(nic->mac_control.stats_info->xpak_stat.xpak_timer_count < 72000) {
3084 /* waiting for an hour */
3085 nic->mac_control.stats_info->xpak_stat.xpak_timer_count++;
3087 s2io_updt_xpak_counter(dev);
3088 /* reset the count to zero */
3089 nic->mac_control.stats_info->xpak_stat.xpak_timer_count = 0;
3092 /* Handling link status change error Intr */
3093 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
3094 err_reg = readq(&bar0->mac_rmac_err_reg);
3095 writeq(err_reg, &bar0->mac_rmac_err_reg);
3096 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
3097 schedule_work(&nic->set_link_task);
3101 /* Handling Ecc errors */
3102 val64 = readq(&bar0->mc_err_reg);
3103 writeq(val64, &bar0->mc_err_reg);
3104 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
3105 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
3106 nic->mac_control.stats_info->sw_stat.
3108 DBG_PRINT(INIT_DBG, "%s: Device indicates ",
3110 DBG_PRINT(INIT_DBG, "double ECC error!!\n");
3111 if (nic->device_type != XFRAME_II_DEVICE) {
3112 /* Reset XframeI only if critical error */
3113 if (val64 & (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
3114 MC_ERR_REG_MIRI_ECC_DB_ERR_1)) {
3115 netif_stop_queue(dev);
3116 schedule_work(&nic->rst_timer_task);
3117 nic->mac_control.stats_info->sw_stat.
3122 nic->mac_control.stats_info->sw_stat.
3127 /* In case of a serious error, the device will be Reset. */
3128 val64 = readq(&bar0->serr_source);
3129 if (val64 & SERR_SOURCE_ANY) {
3130 nic->mac_control.stats_info->sw_stat.serious_err_cnt++;
3131 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
3132 DBG_PRINT(ERR_DBG, "serious error %llx!!\n",
3133 (unsigned long long)val64);
3134 netif_stop_queue(dev);
3135 schedule_work(&nic->rst_timer_task);
3136 nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
3140 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
3141 * Error occurs, the adapter will be recycled by disabling the
3142 * adapter enable bit and enabling it again after the device
3143 * becomes Quiescent.
3145 val64 = readq(&bar0->pcc_err_reg);
3146 writeq(val64, &bar0->pcc_err_reg);
3147 if (val64 & PCC_FB_ECC_DB_ERR) {
3148 u64 ac = readq(&bar0->adapter_control);
3149 ac &= ~(ADAPTER_CNTL_EN);
3150 writeq(ac, &bar0->adapter_control);
3151 ac = readq(&bar0->adapter_control);
3152 schedule_work(&nic->set_link_task);
3154 /* Check for data parity error */
3155 val64 = readq(&bar0->pic_int_status);
3156 if (val64 & PIC_INT_GPIO) {
3157 val64 = readq(&bar0->gpio_int_reg);
3158 if (val64 & GPIO_INT_REG_DP_ERR_INT) {
3159 nic->mac_control.stats_info->sw_stat.parity_err_cnt++;
3160 schedule_work(&nic->rst_timer_task);
3161 nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
3165 /* Check for ring full counter */
3166 if (nic->device_type & XFRAME_II_DEVICE) {
3167 val64 = readq(&bar0->ring_bump_counter1);
3168 for (i=0; i<4; i++) {
3169 cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
3170 cnt >>= 64 - ((i+1)*16);
3171 nic->mac_control.stats_info->sw_stat.ring_full_cnt
3175 val64 = readq(&bar0->ring_bump_counter2);
3176 for (i=0; i<4; i++) {
3177 cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
3178 cnt >>= 64 - ((i+1)*16);
3179 nic->mac_control.stats_info->sw_stat.ring_full_cnt
3184 /* Other type of interrupts are not being handled now, TODO */
3188 * wait_for_cmd_complete - waits for a command to complete.
3189 * @sp : private member of the device structure, which is a pointer to the
3190 * s2io_nic structure.
3191 * Description: Function that waits for a command to Write into RMAC
3192 * ADDR DATA registers to be completed and returns either success or
3193 * error depending on whether the command was complete or not.
3195 * SUCCESS on success and FAILURE on failure.
3198 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit)
3200 int ret = FAILURE, cnt = 0;
3204 val64 = readq(addr);
3205 if (!(val64 & busy_bit)) {
3221 * check_pci_device_id - Checks if the device id is supported
3223 * Description: Function to check if the pci device id is supported by driver.
3224 * Return value: Actual device id if supported else PCI_ANY_ID
3226 static u16 check_pci_device_id(u16 id)
3229 case PCI_DEVICE_ID_HERC_WIN:
3230 case PCI_DEVICE_ID_HERC_UNI:
3231 return XFRAME_II_DEVICE;
3232 case PCI_DEVICE_ID_S2IO_UNI:
3233 case PCI_DEVICE_ID_S2IO_WIN:
3234 return XFRAME_I_DEVICE;
3241 * s2io_reset - Resets the card.
3242 * @sp : private member of the device structure.
3243 * Description: Function to Reset the card. This function then also
3244 * restores the previously saved PCI configuration space registers as
3245 * the card reset also resets the configuration space.
3250 static void s2io_reset(struct s2io_nic * sp)
3252 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3257 DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3258 __FUNCTION__, sp->dev->name);
3260 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3261 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3263 if (sp->device_type == XFRAME_II_DEVICE) {
3265 ret = pci_set_power_state(sp->pdev, 3);
3267 ret = pci_set_power_state(sp->pdev, 0);
3269 DBG_PRINT(ERR_DBG,"%s PME based SW_Reset failed!\n",
3277 val64 = SW_RESET_ALL;
3278 writeq(val64, &bar0->sw_reset);
3280 if (strstr(sp->product_name, "CX4")) {
3284 for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3286 /* Restore the PCI state saved during initialization. */
3287 pci_restore_state(sp->pdev);
3288 pci_read_config_word(sp->pdev, 0x2, &val16);
3289 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3294 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3295 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
3298 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3302 /* Set swapper to enable I/O register access */
3303 s2io_set_swapper(sp);
3305 /* Restore the MSIX table entries from local variables */
3306 restore_xmsi_data(sp);
3308 /* Clear certain PCI/PCI-X fields after reset */
3309 if (sp->device_type == XFRAME_II_DEVICE) {
3310 /* Clear "detected parity error" bit */
3311 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3313 /* Clearing PCIX Ecc status register */
3314 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3316 /* Clearing PCI_STATUS error reflected here */
3317 writeq(BIT(62), &bar0->txpic_int_reg);
3320 /* Reset device statistics maintained by OS */
3321 memset(&sp->stats, 0, sizeof (struct net_device_stats));
3323 /* SXE-002: Configure link and activity LED to turn it off */
3324 subid = sp->pdev->subsystem_device;
3325 if (((subid & 0xFF) >= 0x07) &&
3326 (sp->device_type == XFRAME_I_DEVICE)) {
3327 val64 = readq(&bar0->gpio_control);
3328 val64 |= 0x0000800000000000ULL;
3329 writeq(val64, &bar0->gpio_control);
3330 val64 = 0x0411040400000000ULL;
3331 writeq(val64, (void __iomem *)bar0 + 0x2700);
3335 * Clear spurious ECC interrupts that would have occured on
3336 * XFRAME II cards after reset.
3338 if (sp->device_type == XFRAME_II_DEVICE) {
3339 val64 = readq(&bar0->pcc_err_reg);
3340 writeq(val64, &bar0->pcc_err_reg);
3343 sp->device_enabled_once = FALSE;
3347 * s2io_set_swapper - to set the swapper controle on the card
3348 * @sp : private member of the device structure,
3349 * pointer to the s2io_nic structure.
3350 * Description: Function to set the swapper control on the card
3351 * correctly depending on the 'endianness' of the system.
3353 * SUCCESS on success and FAILURE on failure.
3356 static int s2io_set_swapper(struct s2io_nic * sp)
3358 struct net_device *dev = sp->dev;
3359 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3360 u64 val64, valt, valr;
3363 * Set proper endian settings and verify the same by reading
3364 * the PIF Feed-back register.
3367 val64 = readq(&bar0->pif_rd_swapper_fb);
3368 if (val64 != 0x0123456789ABCDEFULL) {
3370 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
3371 0x8100008181000081ULL, /* FE=1, SE=0 */
3372 0x4200004242000042ULL, /* FE=0, SE=1 */
3373 0}; /* FE=0, SE=0 */
3376 writeq(value[i], &bar0->swapper_ctrl);
3377 val64 = readq(&bar0->pif_rd_swapper_fb);
3378 if (val64 == 0x0123456789ABCDEFULL)
3383 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3385 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3386 (unsigned long long) val64);
3391 valr = readq(&bar0->swapper_ctrl);
3394 valt = 0x0123456789ABCDEFULL;
3395 writeq(valt, &bar0->xmsi_address);
3396 val64 = readq(&bar0->xmsi_address);
3400 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
3401 0x0081810000818100ULL, /* FE=1, SE=0 */
3402 0x0042420000424200ULL, /* FE=0, SE=1 */
3403 0}; /* FE=0, SE=0 */
3406 writeq((value[i] | valr), &bar0->swapper_ctrl);
3407 writeq(valt, &bar0->xmsi_address);
3408 val64 = readq(&bar0->xmsi_address);
3414 unsigned long long x = val64;
3415 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3416 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3420 val64 = readq(&bar0->swapper_ctrl);
3421 val64 &= 0xFFFF000000000000ULL;
3425 * The device by default set to a big endian format, so a
3426 * big endian driver need not set anything.
3428 val64 |= (SWAPPER_CTRL_TXP_FE |
3429 SWAPPER_CTRL_TXP_SE |
3430 SWAPPER_CTRL_TXD_R_FE |
3431 SWAPPER_CTRL_TXD_W_FE |
3432 SWAPPER_CTRL_TXF_R_FE |
3433 SWAPPER_CTRL_RXD_R_FE |
3434 SWAPPER_CTRL_RXD_W_FE |
3435 SWAPPER_CTRL_RXF_W_FE |
3436 SWAPPER_CTRL_XMSI_FE |
3437 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3438 if (sp->intr_type == INTA)
3439 val64 |= SWAPPER_CTRL_XMSI_SE;
3440 writeq(val64, &bar0->swapper_ctrl);
3443 * Initially we enable all bits to make it accessible by the
3444 * driver, then we selectively enable only those bits that
3447 val64 |= (SWAPPER_CTRL_TXP_FE |
3448 SWAPPER_CTRL_TXP_SE |
3449 SWAPPER_CTRL_TXD_R_FE |
3450 SWAPPER_CTRL_TXD_R_SE |
3451 SWAPPER_CTRL_TXD_W_FE |
3452 SWAPPER_CTRL_TXD_W_SE |
3453 SWAPPER_CTRL_TXF_R_FE |
3454 SWAPPER_CTRL_RXD_R_FE |
3455 SWAPPER_CTRL_RXD_R_SE |
3456 SWAPPER_CTRL_RXD_W_FE |
3457 SWAPPER_CTRL_RXD_W_SE |
3458 SWAPPER_CTRL_RXF_W_FE |
3459 SWAPPER_CTRL_XMSI_FE |
3460 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3461 if (sp->intr_type == INTA)
3462 val64 |= SWAPPER_CTRL_XMSI_SE;
3463 writeq(val64, &bar0->swapper_ctrl);
3465 val64 = readq(&bar0->swapper_ctrl);
3468 * Verifying if endian settings are accurate by reading a
3469 * feedback register.
3471 val64 = readq(&bar0->pif_rd_swapper_fb);
3472 if (val64 != 0x0123456789ABCDEFULL) {
3473 /* Endian settings are incorrect, calls for another dekko. */
3474 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3476 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3477 (unsigned long long) val64);
3484 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3486 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3488 int ret = 0, cnt = 0;
3491 val64 = readq(&bar0->xmsi_access);
3492 if (!(val64 & BIT(15)))
3498 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3505 static void restore_xmsi_data(struct s2io_nic *nic)
3507 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3511 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3512 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3513 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3514 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6));
3515 writeq(val64, &bar0->xmsi_access);
3516 if (wait_for_msix_trans(nic, i)) {
3517 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3523 static void store_xmsi_data(struct s2io_nic *nic)
3525 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3526 u64 val64, addr, data;
3529 /* Store and display */
3530 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3531 val64 = (BIT(15) | vBIT(i, 26, 6));
3532 writeq(val64, &bar0->xmsi_access);
3533 if (wait_for_msix_trans(nic, i)) {
3534 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3537 addr = readq(&bar0->xmsi_address);
3538 data = readq(&bar0->xmsi_data);
3540 nic->msix_info[i].addr = addr;
3541 nic->msix_info[i].data = data;
3546 int s2io_enable_msi(struct s2io_nic *nic)
3548 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3549 u16 msi_ctrl, msg_val;
3550 struct config_param *config = &nic->config;
3551 struct net_device *dev = nic->dev;
3552 u64 val64, tx_mat, rx_mat;
3555 val64 = readq(&bar0->pic_control);
3557 writeq(val64, &bar0->pic_control);
3559 err = pci_enable_msi(nic->pdev);
3561 DBG_PRINT(ERR_DBG, "%s: enabling MSI failed\n",
3567 * Enable MSI and use MSI-1 in stead of the standard MSI-0
3568 * for interrupt handling.
3570 pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3572 pci_write_config_word(nic->pdev, 0x4c, msg_val);
3573 pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3575 pci_read_config_word(nic->pdev, 0x42, &msi_ctrl);
3577 pci_write_config_word(nic->pdev, 0x42, msi_ctrl);
3579 /* program MSI-1 into all usable Tx_Mat and Rx_Mat fields */
3580 tx_mat = readq(&bar0->tx_mat0_n[0]);
3581 for (i=0; i<config->tx_fifo_num; i++) {
3582 tx_mat |= TX_MAT_SET(i, 1);
3584 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3586 rx_mat = readq(&bar0->rx_mat);
3587 for (i=0; i<config->rx_ring_num; i++) {
3588 rx_mat |= RX_MAT_SET(i, 1);
3590 writeq(rx_mat, &bar0->rx_mat);
3592 dev->irq = nic->pdev->irq;
3596 static int s2io_enable_msi_x(struct s2io_nic *nic)
3598 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3600 u16 msi_control; /* Temp variable */
3601 int ret, i, j, msix_indx = 1;
3603 nic->entries = kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct msix_entry),
3605 if (nic->entries == NULL) {
3606 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
3609 memset(nic->entries, 0, MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3612 kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry),
3614 if (nic->s2io_entries == NULL) {
3615 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
3616 kfree(nic->entries);
3619 memset(nic->s2io_entries, 0,
3620 MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3622 for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3623 nic->entries[i].entry = i;
3624 nic->s2io_entries[i].entry = i;
3625 nic->s2io_entries[i].arg = NULL;
3626 nic->s2io_entries[i].in_use = 0;