1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2005 Neterion Inc.
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
30 * rx_ring_num : This can be used to program the number of receive rings used
32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
33 * This is also an array of size 8.
34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35 * values are 1, 2 and 3.
36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
38 * Tx descriptors that can be associated with each corresponding FIFO.
39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40 * 1(MSI), 2(MSI_X). Default value is '0(INTA)'
41 * lro: Specifies whether to enable Large Receive Offload (LRO) or not.
42 * Possible values '1' for enable '0' for disable. Default is '0'
43 * lro_max_pkts: This parameter defines maximum number of packets can be
44 * aggregated as a single large packet
45 ************************************************************************/
47 #include <linux/module.h>
48 #include <linux/types.h>
49 #include <linux/errno.h>
50 #include <linux/ioport.h>
51 #include <linux/pci.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/kernel.h>
54 #include <linux/netdevice.h>
55 #include <linux/etherdevice.h>
56 #include <linux/skbuff.h>
57 #include <linux/init.h>
58 #include <linux/delay.h>
59 #include <linux/stddef.h>
60 #include <linux/ioctl.h>
61 #include <linux/timex.h>
62 #include <linux/sched.h>
63 #include <linux/ethtool.h>
64 #include <linux/workqueue.h>
65 #include <linux/if_vlan.h>
67 #include <linux/tcp.h>
70 #include <asm/system.h>
71 #include <asm/uaccess.h>
73 #include <asm/div64.h>
78 #include "s2io-regs.h"
80 #define DRV_VERSION "2.0.15.2"
82 /* S2io Driver name & version. */
83 static char s2io_driver_name[] = "Neterion";
84 static char s2io_driver_version[] = DRV_VERSION;
86 static int rxd_size[4] = {32,48,48,64};
87 static int rxd_count[4] = {127,85,85,63};
89 static inline int RXD_IS_UP2DT(RxD_t *rxdp)
93 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
94 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
100 * Cards with following subsystem_id have a link state indication
101 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
102 * macro below identifies these cards given the subsystem_id.
104 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
105 (dev_type == XFRAME_I_DEVICE) ? \
106 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
107 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
109 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
110 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
111 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
114 static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
116 mac_info_t *mac_control;
118 mac_control = &sp->mac_control;
119 if (rxb_size <= rxd_count[sp->rxd_mode])
121 else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
126 /* Ethtool related variables and Macros. */
127 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
128 "Register test\t(offline)",
129 "Eeprom test\t(offline)",
130 "Link test\t(online)",
131 "RLDRAM test\t(offline)",
132 "BIST Test\t(offline)"
135 static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
137 {"tmac_data_octets"},
141 {"tmac_pause_ctrl_frms"},
145 {"tmac_any_err_frms"},
146 {"tmac_ttl_less_fb_octets"},
147 {"tmac_vld_ip_octets"},
155 {"rmac_data_octets"},
156 {"rmac_fcs_err_frms"},
158 {"rmac_vld_mcst_frms"},
159 {"rmac_vld_bcst_frms"},
160 {"rmac_in_rng_len_err_frms"},
161 {"rmac_out_rng_len_err_frms"},
163 {"rmac_pause_ctrl_frms"},
164 {"rmac_unsup_ctrl_frms"},
166 {"rmac_accepted_ucst_frms"},
167 {"rmac_accepted_nucst_frms"},
168 {"rmac_discarded_frms"},
169 {"rmac_drop_events"},
170 {"rmac_ttl_less_fb_octets"},
172 {"rmac_usized_frms"},
173 {"rmac_osized_frms"},
175 {"rmac_jabber_frms"},
176 {"rmac_ttl_64_frms"},
177 {"rmac_ttl_65_127_frms"},
178 {"rmac_ttl_128_255_frms"},
179 {"rmac_ttl_256_511_frms"},
180 {"rmac_ttl_512_1023_frms"},
181 {"rmac_ttl_1024_1518_frms"},
189 {"rmac_err_drp_udp"},
190 {"rmac_xgmii_err_sym"},
208 {"rmac_xgmii_data_err_cnt"},
209 {"rmac_xgmii_ctrl_err_cnt"},
210 {"rmac_accepted_ip"},
214 {"new_rd_req_rtry_cnt"},
216 {"wr_rtry_rd_ack_cnt"},
219 {"new_wr_req_rtry_cnt"},
222 {"rd_rtry_wr_ack_cnt"},
230 {"rmac_ttl_1519_4095_frms"},
231 {"rmac_ttl_4096_8191_frms"},
232 {"rmac_ttl_8192_max_frms"},
233 {"rmac_ttl_gt_max_frms"},
234 {"rmac_osized_alt_frms"},
235 {"rmac_jabber_alt_frms"},
236 {"rmac_gt_max_alt_frms"},
238 {"rmac_len_discard"},
239 {"rmac_fcs_discard"},
242 {"rmac_red_discard"},
243 {"rmac_rts_discard"},
244 {"rmac_ingm_full_discard"},
246 {"\n DRIVER STATISTICS"},
247 {"single_bit_ecc_errs"},
248 {"double_bit_ecc_errs"},
254 ("alarm_transceiver_temp_high"),
255 ("alarm_transceiver_temp_low"),
256 ("alarm_laser_bias_current_high"),
257 ("alarm_laser_bias_current_low"),
258 ("alarm_laser_output_power_high"),
259 ("alarm_laser_output_power_low"),
260 ("warn_transceiver_temp_high"),
261 ("warn_transceiver_temp_low"),
262 ("warn_laser_bias_current_high"),
263 ("warn_laser_bias_current_low"),
264 ("warn_laser_output_power_high"),
265 ("warn_laser_output_power_low"),
266 ("lro_aggregated_pkts"),
267 ("lro_flush_both_count"),
268 ("lro_out_of_sequence_pkts"),
269 ("lro_flush_due_to_max_pkts"),
270 ("lro_avg_aggr_pkts"),
273 #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
274 #define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
276 #define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
277 #define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
279 #define S2IO_TIMER_CONF(timer, handle, arg, exp) \
280 init_timer(&timer); \
281 timer.function = handle; \
282 timer.data = (unsigned long) arg; \
283 mod_timer(&timer, (jiffies + exp)) \
286 static void s2io_vlan_rx_register(struct net_device *dev,
287 struct vlan_group *grp)
289 nic_t *nic = dev->priv;
292 spin_lock_irqsave(&nic->tx_lock, flags);
294 spin_unlock_irqrestore(&nic->tx_lock, flags);
297 /* Unregister the vlan */
298 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
300 nic_t *nic = dev->priv;
303 spin_lock_irqsave(&nic->tx_lock, flags);
305 nic->vlgrp->vlan_devices[vid] = NULL;
306 spin_unlock_irqrestore(&nic->tx_lock, flags);
310 * Constants to be programmed into the Xena's registers, to configure
315 static const u64 herc_act_dtx_cfg[] = {
317 0x8000051536750000ULL, 0x80000515367500E0ULL,
319 0x8000051536750004ULL, 0x80000515367500E4ULL,
321 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
323 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
325 0x801205150D440000ULL, 0x801205150D4400E0ULL,
327 0x801205150D440004ULL, 0x801205150D4400E4ULL,
329 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
331 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
336 static const u64 xena_dtx_cfg[] = {
338 0x8000051500000000ULL, 0x80000515000000E0ULL,
340 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
342 0x8001051500000000ULL, 0x80010515000000E0ULL,
344 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
346 0x8002051500000000ULL, 0x80020515000000E0ULL,
348 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
353 * Constants for Fixing the MacAddress problem seen mostly on
356 static const u64 fix_mac[] = {
357 0x0060000000000000ULL, 0x0060600000000000ULL,
358 0x0040600000000000ULL, 0x0000600000000000ULL,
359 0x0020600000000000ULL, 0x0060600000000000ULL,
360 0x0020600000000000ULL, 0x0060600000000000ULL,
361 0x0020600000000000ULL, 0x0060600000000000ULL,
362 0x0020600000000000ULL, 0x0060600000000000ULL,
363 0x0020600000000000ULL, 0x0060600000000000ULL,
364 0x0020600000000000ULL, 0x0060600000000000ULL,
365 0x0020600000000000ULL, 0x0060600000000000ULL,
366 0x0020600000000000ULL, 0x0060600000000000ULL,
367 0x0020600000000000ULL, 0x0060600000000000ULL,
368 0x0020600000000000ULL, 0x0060600000000000ULL,
369 0x0020600000000000ULL, 0x0000600000000000ULL,
370 0x0040600000000000ULL, 0x0060600000000000ULL,
374 MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
375 MODULE_LICENSE("GPL");
376 MODULE_VERSION(DRV_VERSION);
379 /* Module Loadable parameters. */
380 S2IO_PARM_INT(tx_fifo_num, 1);
381 S2IO_PARM_INT(rx_ring_num, 1);
384 S2IO_PARM_INT(rx_ring_mode, 1);
385 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
386 S2IO_PARM_INT(rmac_pause_time, 0x100);
387 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
388 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
389 S2IO_PARM_INT(shared_splits, 0);
390 S2IO_PARM_INT(tmac_util_period, 5);
391 S2IO_PARM_INT(rmac_util_period, 5);
392 S2IO_PARM_INT(bimodal, 0);
393 S2IO_PARM_INT(l3l4hdr_size, 128);
394 /* Frequency of Rx desc syncs expressed as power of 2 */
395 S2IO_PARM_INT(rxsync_frequency, 3);
396 /* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */
397 S2IO_PARM_INT(intr_type, 0);
398 /* Large receive offload feature */
399 S2IO_PARM_INT(lro, 0);
400 /* Max pkts to be aggregated by LRO at one time. If not specified,
401 * aggregation happens until we hit max IP pkt size(64K)
403 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
404 S2IO_PARM_INT(indicate_max_pkts, 0);
406 S2IO_PARM_INT(napi, 1);
407 S2IO_PARM_INT(ufo, 0);
409 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
410 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
411 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
412 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
413 static unsigned int rts_frm_len[MAX_RX_RINGS] =
414 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
416 module_param_array(tx_fifo_len, uint, NULL, 0);
417 module_param_array(rx_ring_sz, uint, NULL, 0);
418 module_param_array(rts_frm_len, uint, NULL, 0);
422 * This table lists all the devices that this driver supports.
424 static struct pci_device_id s2io_tbl[] __devinitdata = {
425 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
426 PCI_ANY_ID, PCI_ANY_ID},
427 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
428 PCI_ANY_ID, PCI_ANY_ID},
429 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
430 PCI_ANY_ID, PCI_ANY_ID},
431 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
432 PCI_ANY_ID, PCI_ANY_ID},
436 MODULE_DEVICE_TABLE(pci, s2io_tbl);
438 static struct pci_driver s2io_driver = {
440 .id_table = s2io_tbl,
441 .probe = s2io_init_nic,
442 .remove = __devexit_p(s2io_rem_nic),
445 /* A simplifier macro used both by init and free shared_mem Fns(). */
446 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
449 * init_shared_mem - Allocation and Initialization of Memory
450 * @nic: Device private variable.
451 * Description: The function allocates all the memory areas shared
452 * between the NIC and the driver. This includes Tx descriptors,
453 * Rx descriptors and the statistics block.
456 static int init_shared_mem(struct s2io_nic *nic)
459 void *tmp_v_addr, *tmp_v_addr_next;
460 dma_addr_t tmp_p_addr, tmp_p_addr_next;
461 RxD_block_t *pre_rxd_blk = NULL;
462 int i, j, blk_cnt, rx_sz, tx_sz;
463 int lst_size, lst_per_page;
464 struct net_device *dev = nic->dev;
468 mac_info_t *mac_control;
469 struct config_param *config;
471 mac_control = &nic->mac_control;
472 config = &nic->config;
475 /* Allocation and initialization of TXDLs in FIOFs */
477 for (i = 0; i < config->tx_fifo_num; i++) {
478 size += config->tx_cfg[i].fifo_len;
480 if (size > MAX_AVAILABLE_TXDS) {
481 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
482 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
486 lst_size = (sizeof(TxD_t) * config->max_txds);
487 tx_sz = lst_size * size;
488 lst_per_page = PAGE_SIZE / lst_size;
490 for (i = 0; i < config->tx_fifo_num; i++) {
491 int fifo_len = config->tx_cfg[i].fifo_len;
492 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
493 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
495 if (!mac_control->fifos[i].list_info) {
497 "Malloc failed for list_info\n");
500 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
502 for (i = 0; i < config->tx_fifo_num; i++) {
503 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
505 mac_control->fifos[i].tx_curr_put_info.offset = 0;
506 mac_control->fifos[i].tx_curr_put_info.fifo_len =
507 config->tx_cfg[i].fifo_len - 1;
508 mac_control->fifos[i].tx_curr_get_info.offset = 0;
509 mac_control->fifos[i].tx_curr_get_info.fifo_len =
510 config->tx_cfg[i].fifo_len - 1;
511 mac_control->fifos[i].fifo_no = i;
512 mac_control->fifos[i].nic = nic;
513 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
515 for (j = 0; j < page_num; j++) {
519 tmp_v = pci_alloc_consistent(nic->pdev,
523 "pci_alloc_consistent ");
524 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
527 /* If we got a zero DMA address(can happen on
528 * certain platforms like PPC), reallocate.
529 * Store virtual address of page we don't want,
533 mac_control->zerodma_virt_addr = tmp_v;
535 "%s: Zero DMA address for TxDL. ", dev->name);
537 "Virtual address %p\n", tmp_v);
538 tmp_v = pci_alloc_consistent(nic->pdev,
542 "pci_alloc_consistent ");
543 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
547 while (k < lst_per_page) {
548 int l = (j * lst_per_page) + k;
549 if (l == config->tx_cfg[i].fifo_len)
551 mac_control->fifos[i].list_info[l].list_virt_addr =
552 tmp_v + (k * lst_size);
553 mac_control->fifos[i].list_info[l].list_phy_addr =
554 tmp_p + (k * lst_size);
560 nic->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
561 if (!nic->ufo_in_band_v)
564 /* Allocation and initialization of RXDs in Rings */
566 for (i = 0; i < config->rx_ring_num; i++) {
567 if (config->rx_cfg[i].num_rxd %
568 (rxd_count[nic->rxd_mode] + 1)) {
569 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
570 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
572 DBG_PRINT(ERR_DBG, "RxDs per Block");
575 size += config->rx_cfg[i].num_rxd;
576 mac_control->rings[i].block_count =
577 config->rx_cfg[i].num_rxd /
578 (rxd_count[nic->rxd_mode] + 1 );
579 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
580 mac_control->rings[i].block_count;
582 if (nic->rxd_mode == RXD_MODE_1)
583 size = (size * (sizeof(RxD1_t)));
585 size = (size * (sizeof(RxD3_t)));
588 for (i = 0; i < config->rx_ring_num; i++) {
589 mac_control->rings[i].rx_curr_get_info.block_index = 0;
590 mac_control->rings[i].rx_curr_get_info.offset = 0;
591 mac_control->rings[i].rx_curr_get_info.ring_len =
592 config->rx_cfg[i].num_rxd - 1;
593 mac_control->rings[i].rx_curr_put_info.block_index = 0;
594 mac_control->rings[i].rx_curr_put_info.offset = 0;
595 mac_control->rings[i].rx_curr_put_info.ring_len =
596 config->rx_cfg[i].num_rxd - 1;
597 mac_control->rings[i].nic = nic;
598 mac_control->rings[i].ring_no = i;
600 blk_cnt = config->rx_cfg[i].num_rxd /
601 (rxd_count[nic->rxd_mode] + 1);
602 /* Allocating all the Rx blocks */
603 for (j = 0; j < blk_cnt; j++) {
604 rx_block_info_t *rx_blocks;
607 rx_blocks = &mac_control->rings[i].rx_blocks[j];
608 size = SIZE_OF_BLOCK; //size is always page size
609 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
611 if (tmp_v_addr == NULL) {
613 * In case of failure, free_shared_mem()
614 * is called, which should free any
615 * memory that was alloced till the
618 rx_blocks->block_virt_addr = tmp_v_addr;
621 memset(tmp_v_addr, 0, size);
622 rx_blocks->block_virt_addr = tmp_v_addr;
623 rx_blocks->block_dma_addr = tmp_p_addr;
624 rx_blocks->rxds = kmalloc(sizeof(rxd_info_t)*
625 rxd_count[nic->rxd_mode],
627 for (l=0; l<rxd_count[nic->rxd_mode];l++) {
628 rx_blocks->rxds[l].virt_addr =
629 rx_blocks->block_virt_addr +
630 (rxd_size[nic->rxd_mode] * l);
631 rx_blocks->rxds[l].dma_addr =
632 rx_blocks->block_dma_addr +
633 (rxd_size[nic->rxd_mode] * l);
636 /* Interlinking all Rx Blocks */
637 for (j = 0; j < blk_cnt; j++) {
639 mac_control->rings[i].rx_blocks[j].block_virt_addr;
641 mac_control->rings[i].rx_blocks[(j + 1) %
642 blk_cnt].block_virt_addr;
644 mac_control->rings[i].rx_blocks[j].block_dma_addr;
646 mac_control->rings[i].rx_blocks[(j + 1) %
647 blk_cnt].block_dma_addr;
649 pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
650 pre_rxd_blk->reserved_2_pNext_RxD_block =
651 (unsigned long) tmp_v_addr_next;
652 pre_rxd_blk->pNext_RxD_Blk_physical =
653 (u64) tmp_p_addr_next;
656 if (nic->rxd_mode >= RXD_MODE_3A) {
658 * Allocation of Storages for buffer addresses in 2BUFF mode
659 * and the buffers as well.
661 for (i = 0; i < config->rx_ring_num; i++) {
662 blk_cnt = config->rx_cfg[i].num_rxd /
663 (rxd_count[nic->rxd_mode]+ 1);
664 mac_control->rings[i].ba =
665 kmalloc((sizeof(buffAdd_t *) * blk_cnt),
667 if (!mac_control->rings[i].ba)
669 for (j = 0; j < blk_cnt; j++) {
671 mac_control->rings[i].ba[j] =
672 kmalloc((sizeof(buffAdd_t) *
673 (rxd_count[nic->rxd_mode] + 1)),
675 if (!mac_control->rings[i].ba[j])
677 while (k != rxd_count[nic->rxd_mode]) {
678 ba = &mac_control->rings[i].ba[j][k];
680 ba->ba_0_org = (void *) kmalloc
681 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
684 tmp = (unsigned long)ba->ba_0_org;
686 tmp &= ~((unsigned long) ALIGN_SIZE);
687 ba->ba_0 = (void *) tmp;
689 ba->ba_1_org = (void *) kmalloc
690 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
693 tmp = (unsigned long) ba->ba_1_org;
695 tmp &= ~((unsigned long) ALIGN_SIZE);
696 ba->ba_1 = (void *) tmp;
703 /* Allocation and initialization of Statistics block */
704 size = sizeof(StatInfo_t);
705 mac_control->stats_mem = pci_alloc_consistent
706 (nic->pdev, size, &mac_control->stats_mem_phy);
708 if (!mac_control->stats_mem) {
710 * In case of failure, free_shared_mem() is called, which
711 * should free any memory that was alloced till the
716 mac_control->stats_mem_sz = size;
718 tmp_v_addr = mac_control->stats_mem;
719 mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
720 memset(tmp_v_addr, 0, size);
721 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
722 (unsigned long long) tmp_p_addr);
728 * free_shared_mem - Free the allocated Memory
729 * @nic: Device private variable.
730 * Description: This function is to free all memory locations allocated by
731 * the init_shared_mem() function and return it to the kernel.
734 static void free_shared_mem(struct s2io_nic *nic)
736 int i, j, blk_cnt, size;
738 dma_addr_t tmp_p_addr;
739 mac_info_t *mac_control;
740 struct config_param *config;
741 int lst_size, lst_per_page;
742 struct net_device *dev = nic->dev;
747 mac_control = &nic->mac_control;
748 config = &nic->config;
750 lst_size = (sizeof(TxD_t) * config->max_txds);
751 lst_per_page = PAGE_SIZE / lst_size;
753 for (i = 0; i < config->tx_fifo_num; i++) {
754 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
756 for (j = 0; j < page_num; j++) {
757 int mem_blks = (j * lst_per_page);
758 if (!mac_control->fifos[i].list_info)
760 if (!mac_control->fifos[i].list_info[mem_blks].
763 pci_free_consistent(nic->pdev, PAGE_SIZE,
764 mac_control->fifos[i].
767 mac_control->fifos[i].
771 /* If we got a zero DMA address during allocation,
774 if (mac_control->zerodma_virt_addr) {
775 pci_free_consistent(nic->pdev, PAGE_SIZE,
776 mac_control->zerodma_virt_addr,
779 "%s: Freeing TxDL with zero DMA addr. ",
781 DBG_PRINT(INIT_DBG, "Virtual address %p\n",
782 mac_control->zerodma_virt_addr);
784 kfree(mac_control->fifos[i].list_info);
787 size = SIZE_OF_BLOCK;
788 for (i = 0; i < config->rx_ring_num; i++) {
789 blk_cnt = mac_control->rings[i].block_count;
790 for (j = 0; j < blk_cnt; j++) {
791 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
793 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
795 if (tmp_v_addr == NULL)
797 pci_free_consistent(nic->pdev, size,
798 tmp_v_addr, tmp_p_addr);
799 kfree(mac_control->rings[i].rx_blocks[j].rxds);
803 if (nic->rxd_mode >= RXD_MODE_3A) {
804 /* Freeing buffer storage addresses in 2BUFF mode. */
805 for (i = 0; i < config->rx_ring_num; i++) {
806 blk_cnt = config->rx_cfg[i].num_rxd /
807 (rxd_count[nic->rxd_mode] + 1);
808 for (j = 0; j < blk_cnt; j++) {
810 if (!mac_control->rings[i].ba[j])
812 while (k != rxd_count[nic->rxd_mode]) {
814 &mac_control->rings[i].ba[j][k];
819 kfree(mac_control->rings[i].ba[j]);
821 kfree(mac_control->rings[i].ba);
825 if (mac_control->stats_mem) {
826 pci_free_consistent(nic->pdev,
827 mac_control->stats_mem_sz,
828 mac_control->stats_mem,
829 mac_control->stats_mem_phy);
831 if (nic->ufo_in_band_v)
832 kfree(nic->ufo_in_band_v);
836 * s2io_verify_pci_mode -
839 static int s2io_verify_pci_mode(nic_t *nic)
841 XENA_dev_config_t __iomem *bar0 = nic->bar0;
842 register u64 val64 = 0;
845 val64 = readq(&bar0->pci_mode);
846 mode = (u8)GET_PCI_MODE(val64);
848 if ( val64 & PCI_MODE_UNKNOWN_MODE)
849 return -1; /* Unknown PCI mode */
853 #define NEC_VENID 0x1033
854 #define NEC_DEVID 0x0125
855 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
857 struct pci_dev *tdev = NULL;
858 while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
859 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
860 if (tdev->bus == s2io_pdev->bus->parent)
868 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
870 * s2io_print_pci_mode -
872 static int s2io_print_pci_mode(nic_t *nic)
874 XENA_dev_config_t __iomem *bar0 = nic->bar0;
875 register u64 val64 = 0;
877 struct config_param *config = &nic->config;
879 val64 = readq(&bar0->pci_mode);
880 mode = (u8)GET_PCI_MODE(val64);
882 if ( val64 & PCI_MODE_UNKNOWN_MODE)
883 return -1; /* Unknown PCI mode */
885 config->bus_speed = bus_speed[mode];
887 if (s2io_on_nec_bridge(nic->pdev)) {
888 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
893 if (val64 & PCI_MODE_32_BITS) {
894 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
896 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
900 case PCI_MODE_PCI_33:
901 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
903 case PCI_MODE_PCI_66:
904 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
906 case PCI_MODE_PCIX_M1_66:
907 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
909 case PCI_MODE_PCIX_M1_100:
910 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
912 case PCI_MODE_PCIX_M1_133:
913 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
915 case PCI_MODE_PCIX_M2_66:
916 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
918 case PCI_MODE_PCIX_M2_100:
919 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
921 case PCI_MODE_PCIX_M2_133:
922 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
925 return -1; /* Unsupported bus speed */
932 * init_nic - Initialization of hardware
933 * @nic: device peivate variable
934 * Description: The function sequentially configures every block
935 * of the H/W from their reset values.
936 * Return Value: SUCCESS on success and
937 * '-1' on failure (endian settings incorrect).
940 static int init_nic(struct s2io_nic *nic)
942 XENA_dev_config_t __iomem *bar0 = nic->bar0;
943 struct net_device *dev = nic->dev;
944 register u64 val64 = 0;
948 mac_info_t *mac_control;
949 struct config_param *config;
951 unsigned long long mem_share;
954 mac_control = &nic->mac_control;
955 config = &nic->config;
957 /* to set the swapper controle on the card */
958 if(s2io_set_swapper(nic)) {
959 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
964 * Herc requires EOI to be removed from reset before XGXS, so..
966 if (nic->device_type & XFRAME_II_DEVICE) {
967 val64 = 0xA500000000ULL;
968 writeq(val64, &bar0->sw_reset);
970 val64 = readq(&bar0->sw_reset);
973 /* Remove XGXS from reset state */
975 writeq(val64, &bar0->sw_reset);
977 val64 = readq(&bar0->sw_reset);
979 /* Enable Receiving broadcasts */
980 add = &bar0->mac_cfg;
981 val64 = readq(&bar0->mac_cfg);
982 val64 |= MAC_RMAC_BCAST_ENABLE;
983 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
984 writel((u32) val64, add);
985 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
986 writel((u32) (val64 >> 32), (add + 4));
988 /* Read registers in all blocks */
989 val64 = readq(&bar0->mac_int_mask);
990 val64 = readq(&bar0->mc_int_mask);
991 val64 = readq(&bar0->xgxs_int_mask);
995 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
997 if (nic->device_type & XFRAME_II_DEVICE) {
998 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
999 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1000 &bar0->dtx_control, UF);
1002 msleep(1); /* Necessary!! */
1006 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1007 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1008 &bar0->dtx_control, UF);
1009 val64 = readq(&bar0->dtx_control);
1014 /* Tx DMA Initialization */
1016 writeq(val64, &bar0->tx_fifo_partition_0);
1017 writeq(val64, &bar0->tx_fifo_partition_1);
1018 writeq(val64, &bar0->tx_fifo_partition_2);
1019 writeq(val64, &bar0->tx_fifo_partition_3);
1022 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1024 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
1025 13) | vBIT(config->tx_cfg[i].fifo_priority,
1028 if (i == (config->tx_fifo_num - 1)) {
1035 writeq(val64, &bar0->tx_fifo_partition_0);
1039 writeq(val64, &bar0->tx_fifo_partition_1);
1043 writeq(val64, &bar0->tx_fifo_partition_2);
1047 writeq(val64, &bar0->tx_fifo_partition_3);
1053 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1054 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1056 if ((nic->device_type == XFRAME_I_DEVICE) &&
1057 (get_xena_rev_id(nic->pdev) < 4))
1058 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1060 val64 = readq(&bar0->tx_fifo_partition_0);
1061 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1062 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1065 * Initialization of Tx_PA_CONFIG register to ignore packet
1066 * integrity checking.
1068 val64 = readq(&bar0->tx_pa_cfg);
1069 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1070 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1071 writeq(val64, &bar0->tx_pa_cfg);
1073 /* Rx DMA intialization. */
1075 for (i = 0; i < config->rx_ring_num; i++) {
1077 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1080 writeq(val64, &bar0->rx_queue_priority);
1083 * Allocating equal share of memory to all the
1087 if (nic->device_type & XFRAME_II_DEVICE)
1092 for (i = 0; i < config->rx_ring_num; i++) {
1095 mem_share = (mem_size / config->rx_ring_num +
1096 mem_size % config->rx_ring_num);
1097 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1100 mem_share = (mem_size / config->rx_ring_num);
1101 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1104 mem_share = (mem_size / config->rx_ring_num);
1105 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1108 mem_share = (mem_size / config->rx_ring_num);
1109 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1112 mem_share = (mem_size / config->rx_ring_num);
1113 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1116 mem_share = (mem_size / config->rx_ring_num);
1117 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1120 mem_share = (mem_size / config->rx_ring_num);
1121 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1124 mem_share = (mem_size / config->rx_ring_num);
1125 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1129 writeq(val64, &bar0->rx_queue_cfg);
1132 * Filling Tx round robin registers
1133 * as per the number of FIFOs
1135 switch (config->tx_fifo_num) {
1137 val64 = 0x0000000000000000ULL;
1138 writeq(val64, &bar0->tx_w_round_robin_0);
1139 writeq(val64, &bar0->tx_w_round_robin_1);
1140 writeq(val64, &bar0->tx_w_round_robin_2);
1141 writeq(val64, &bar0->tx_w_round_robin_3);
1142 writeq(val64, &bar0->tx_w_round_robin_4);
1145 val64 = 0x0000010000010000ULL;
1146 writeq(val64, &bar0->tx_w_round_robin_0);
1147 val64 = 0x0100000100000100ULL;
1148 writeq(val64, &bar0->tx_w_round_robin_1);
1149 val64 = 0x0001000001000001ULL;
1150 writeq(val64, &bar0->tx_w_round_robin_2);
1151 val64 = 0x0000010000010000ULL;
1152 writeq(val64, &bar0->tx_w_round_robin_3);
1153 val64 = 0x0100000000000000ULL;
1154 writeq(val64, &bar0->tx_w_round_robin_4);
1157 val64 = 0x0001000102000001ULL;
1158 writeq(val64, &bar0->tx_w_round_robin_0);
1159 val64 = 0x0001020000010001ULL;
1160 writeq(val64, &bar0->tx_w_round_robin_1);
1161 val64 = 0x0200000100010200ULL;
1162 writeq(val64, &bar0->tx_w_round_robin_2);
1163 val64 = 0x0001000102000001ULL;
1164 writeq(val64, &bar0->tx_w_round_robin_3);
1165 val64 = 0x0001020000000000ULL;
1166 writeq(val64, &bar0->tx_w_round_robin_4);
1169 val64 = 0x0001020300010200ULL;
1170 writeq(val64, &bar0->tx_w_round_robin_0);
1171 val64 = 0x0100000102030001ULL;
1172 writeq(val64, &bar0->tx_w_round_robin_1);
1173 val64 = 0x0200010000010203ULL;
1174 writeq(val64, &bar0->tx_w_round_robin_2);
1175 val64 = 0x0001020001000001ULL;
1176 writeq(val64, &bar0->tx_w_round_robin_3);
1177 val64 = 0x0203000100000000ULL;
1178 writeq(val64, &bar0->tx_w_round_robin_4);
1181 val64 = 0x0001000203000102ULL;
1182 writeq(val64, &bar0->tx_w_round_robin_0);
1183 val64 = 0x0001020001030004ULL;
1184 writeq(val64, &bar0->tx_w_round_robin_1);
1185 val64 = 0x0001000203000102ULL;
1186 writeq(val64, &bar0->tx_w_round_robin_2);
1187 val64 = 0x0001020001030004ULL;
1188 writeq(val64, &bar0->tx_w_round_robin_3);
1189 val64 = 0x0001000000000000ULL;
1190 writeq(val64, &bar0->tx_w_round_robin_4);
1193 val64 = 0x0001020304000102ULL;
1194 writeq(val64, &bar0->tx_w_round_robin_0);
1195 val64 = 0x0304050001020001ULL;
1196 writeq(val64, &bar0->tx_w_round_robin_1);
1197 val64 = 0x0203000100000102ULL;
1198 writeq(val64, &bar0->tx_w_round_robin_2);
1199 val64 = 0x0304000102030405ULL;
1200 writeq(val64, &bar0->tx_w_round_robin_3);
1201 val64 = 0x0001000200000000ULL;
1202 writeq(val64, &bar0->tx_w_round_robin_4);
1205 val64 = 0x0001020001020300ULL;
1206 writeq(val64, &bar0->tx_w_round_robin_0);
1207 val64 = 0x0102030400010203ULL;
1208 writeq(val64, &bar0->tx_w_round_robin_1);
1209 val64 = 0x0405060001020001ULL;
1210 writeq(val64, &bar0->tx_w_round_robin_2);
1211 val64 = 0x0304050000010200ULL;
1212 writeq(val64, &bar0->tx_w_round_robin_3);
1213 val64 = 0x0102030000000000ULL;
1214 writeq(val64, &bar0->tx_w_round_robin_4);
1217 val64 = 0x0001020300040105ULL;
1218 writeq(val64, &bar0->tx_w_round_robin_0);
1219 val64 = 0x0200030106000204ULL;
1220 writeq(val64, &bar0->tx_w_round_robin_1);
1221 val64 = 0x0103000502010007ULL;
1222 writeq(val64, &bar0->tx_w_round_robin_2);
1223 val64 = 0x0304010002060500ULL;
1224 writeq(val64, &bar0->tx_w_round_robin_3);
1225 val64 = 0x0103020400000000ULL;
1226 writeq(val64, &bar0->tx_w_round_robin_4);
1230 /* Enable all configured Tx FIFO partitions */
1231 val64 = readq(&bar0->tx_fifo_partition_0);
1232 val64 |= (TX_FIFO_PARTITION_EN);
1233 writeq(val64, &bar0->tx_fifo_partition_0);
1235 /* Filling the Rx round robin registers as per the
1236 * number of Rings and steering based on QoS.
1238 switch (config->rx_ring_num) {
1240 val64 = 0x8080808080808080ULL;
1241 writeq(val64, &bar0->rts_qos_steering);
1244 val64 = 0x0000010000010000ULL;
1245 writeq(val64, &bar0->rx_w_round_robin_0);
1246 val64 = 0x0100000100000100ULL;
1247 writeq(val64, &bar0->rx_w_round_robin_1);
1248 val64 = 0x0001000001000001ULL;
1249 writeq(val64, &bar0->rx_w_round_robin_2);
1250 val64 = 0x0000010000010000ULL;
1251 writeq(val64, &bar0->rx_w_round_robin_3);
1252 val64 = 0x0100000000000000ULL;
1253 writeq(val64, &bar0->rx_w_round_robin_4);
1255 val64 = 0x8080808040404040ULL;
1256 writeq(val64, &bar0->rts_qos_steering);
1259 val64 = 0x0001000102000001ULL;
1260 writeq(val64, &bar0->rx_w_round_robin_0);
1261 val64 = 0x0001020000010001ULL;
1262 writeq(val64, &bar0->rx_w_round_robin_1);
1263 val64 = 0x0200000100010200ULL;
1264 writeq(val64, &bar0->rx_w_round_robin_2);
1265 val64 = 0x0001000102000001ULL;
1266 writeq(val64, &bar0->rx_w_round_robin_3);
1267 val64 = 0x0001020000000000ULL;
1268 writeq(val64, &bar0->rx_w_round_robin_4);
1270 val64 = 0x8080804040402020ULL;
1271 writeq(val64, &bar0->rts_qos_steering);
1274 val64 = 0x0001020300010200ULL;
1275 writeq(val64, &bar0->rx_w_round_robin_0);
1276 val64 = 0x0100000102030001ULL;
1277 writeq(val64, &bar0->rx_w_round_robin_1);
1278 val64 = 0x0200010000010203ULL;
1279 writeq(val64, &bar0->rx_w_round_robin_2);
1280 val64 = 0x0001020001000001ULL;
1281 writeq(val64, &bar0->rx_w_round_robin_3);
1282 val64 = 0x0203000100000000ULL;
1283 writeq(val64, &bar0->rx_w_round_robin_4);
1285 val64 = 0x8080404020201010ULL;
1286 writeq(val64, &bar0->rts_qos_steering);
1289 val64 = 0x0001000203000102ULL;
1290 writeq(val64, &bar0->rx_w_round_robin_0);
1291 val64 = 0x0001020001030004ULL;
1292 writeq(val64, &bar0->rx_w_round_robin_1);
1293 val64 = 0x0001000203000102ULL;
1294 writeq(val64, &bar0->rx_w_round_robin_2);
1295 val64 = 0x0001020001030004ULL;
1296 writeq(val64, &bar0->rx_w_round_robin_3);
1297 val64 = 0x0001000000000000ULL;
1298 writeq(val64, &bar0->rx_w_round_robin_4);
1300 val64 = 0x8080404020201008ULL;
1301 writeq(val64, &bar0->rts_qos_steering);
1304 val64 = 0x0001020304000102ULL;
1305 writeq(val64, &bar0->rx_w_round_robin_0);
1306 val64 = 0x0304050001020001ULL;
1307 writeq(val64, &bar0->rx_w_round_robin_1);
1308 val64 = 0x0203000100000102ULL;
1309 writeq(val64, &bar0->rx_w_round_robin_2);
1310 val64 = 0x0304000102030405ULL;
1311 writeq(val64, &bar0->rx_w_round_robin_3);
1312 val64 = 0x0001000200000000ULL;
1313 writeq(val64, &bar0->rx_w_round_robin_4);
1315 val64 = 0x8080404020100804ULL;
1316 writeq(val64, &bar0->rts_qos_steering);
1319 val64 = 0x0001020001020300ULL;
1320 writeq(val64, &bar0->rx_w_round_robin_0);
1321 val64 = 0x0102030400010203ULL;
1322 writeq(val64, &bar0->rx_w_round_robin_1);
1323 val64 = 0x0405060001020001ULL;
1324 writeq(val64, &bar0->rx_w_round_robin_2);
1325 val64 = 0x0304050000010200ULL;
1326 writeq(val64, &bar0->rx_w_round_robin_3);
1327 val64 = 0x0102030000000000ULL;
1328 writeq(val64, &bar0->rx_w_round_robin_4);
1330 val64 = 0x8080402010080402ULL;
1331 writeq(val64, &bar0->rts_qos_steering);
1334 val64 = 0x0001020300040105ULL;
1335 writeq(val64, &bar0->rx_w_round_robin_0);
1336 val64 = 0x0200030106000204ULL;
1337 writeq(val64, &bar0->rx_w_round_robin_1);
1338 val64 = 0x0103000502010007ULL;
1339 writeq(val64, &bar0->rx_w_round_robin_2);
1340 val64 = 0x0304010002060500ULL;
1341 writeq(val64, &bar0->rx_w_round_robin_3);
1342 val64 = 0x0103020400000000ULL;
1343 writeq(val64, &bar0->rx_w_round_robin_4);
1345 val64 = 0x8040201008040201ULL;
1346 writeq(val64, &bar0->rts_qos_steering);
1352 for (i = 0; i < 8; i++)
1353 writeq(val64, &bar0->rts_frm_len_n[i]);
1355 /* Set the default rts frame length for the rings configured */
1356 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1357 for (i = 0 ; i < config->rx_ring_num ; i++)
1358 writeq(val64, &bar0->rts_frm_len_n[i]);
1360 /* Set the frame length for the configured rings
1361 * desired by the user
1363 for (i = 0; i < config->rx_ring_num; i++) {
1364 /* If rts_frm_len[i] == 0 then it is assumed that user not
1365 * specified frame length steering.
1366 * If the user provides the frame length then program
1367 * the rts_frm_len register for those values or else
1368 * leave it as it is.
1370 if (rts_frm_len[i] != 0) {
1371 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1372 &bar0->rts_frm_len_n[i]);
1376 /* Program statistics memory */
1377 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1379 if (nic->device_type == XFRAME_II_DEVICE) {
1380 val64 = STAT_BC(0x320);
1381 writeq(val64, &bar0->stat_byte_cnt);
1385 * Initializing the sampling rate for the device to calculate the
1386 * bandwidth utilization.
1388 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1389 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1390 writeq(val64, &bar0->mac_link_util);
1394 * Initializing the Transmit and Receive Traffic Interrupt
1398 * TTI Initialization. Default Tx timer gets us about
1399 * 250 interrupts per sec. Continuous interrupts are enabled
1402 if (nic->device_type == XFRAME_II_DEVICE) {
1403 int count = (nic->config.bus_speed * 125)/2;
1404 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1407 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1409 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1410 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1411 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1412 if (use_continuous_tx_intrs)
1413 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1414 writeq(val64, &bar0->tti_data1_mem);
1416 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1417 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1418 TTI_DATA2_MEM_TX_UFC_C(0x70) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1419 writeq(val64, &bar0->tti_data2_mem);
1421 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1422 writeq(val64, &bar0->tti_command_mem);
1425 * Once the operation completes, the Strobe bit of the command
1426 * register will be reset. We poll for this particular condition
1427 * We wait for a maximum of 500ms for the operation to complete,
1428 * if it's not complete by then we return error.
1432 val64 = readq(&bar0->tti_command_mem);
1433 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1437 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1445 if (nic->config.bimodal) {
1447 for (k = 0; k < config->rx_ring_num; k++) {
1448 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1449 val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1450 writeq(val64, &bar0->tti_command_mem);
1453 * Once the operation completes, the Strobe bit of the command
1454 * register will be reset. We poll for this particular condition
1455 * We wait for a maximum of 500ms for the operation to complete,
1456 * if it's not complete by then we return error.
1460 val64 = readq(&bar0->tti_command_mem);
1461 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1466 "%s: TTI init Failed\n",
1476 /* RTI Initialization */
1477 if (nic->device_type == XFRAME_II_DEVICE) {
1479 * Programmed to generate Apprx 500 Intrs per
1482 int count = (nic->config.bus_speed * 125)/4;
1483 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1485 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1487 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1488 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1489 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1491 writeq(val64, &bar0->rti_data1_mem);
1493 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1494 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1495 if (nic->intr_type == MSI_X)
1496 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1497 RTI_DATA2_MEM_RX_UFC_D(0x40));
1499 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1500 RTI_DATA2_MEM_RX_UFC_D(0x80));
1501 writeq(val64, &bar0->rti_data2_mem);
1503 for (i = 0; i < config->rx_ring_num; i++) {
1504 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1505 | RTI_CMD_MEM_OFFSET(i);
1506 writeq(val64, &bar0->rti_command_mem);
1509 * Once the operation completes, the Strobe bit of the
1510 * command register will be reset. We poll for this
1511 * particular condition. We wait for a maximum of 500ms
1512 * for the operation to complete, if it's not complete
1513 * by then we return error.
1517 val64 = readq(&bar0->rti_command_mem);
1518 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1522 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1533 * Initializing proper values as Pause threshold into all
1534 * the 8 Queues on Rx side.
1536 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1537 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1539 /* Disable RMAC PAD STRIPPING */
1540 add = &bar0->mac_cfg;
1541 val64 = readq(&bar0->mac_cfg);
1542 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1543 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1544 writel((u32) (val64), add);
1545 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1546 writel((u32) (val64 >> 32), (add + 4));
1547 val64 = readq(&bar0->mac_cfg);
1549 /* Enable FCS stripping by adapter */
1550 add = &bar0->mac_cfg;
1551 val64 = readq(&bar0->mac_cfg);
1552 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1553 if (nic->device_type == XFRAME_II_DEVICE)
1554 writeq(val64, &bar0->mac_cfg);
1556 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1557 writel((u32) (val64), add);
1558 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1559 writel((u32) (val64 >> 32), (add + 4));
1563 * Set the time value to be inserted in the pause frame
1564 * generated by xena.
1566 val64 = readq(&bar0->rmac_pause_cfg);
1567 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1568 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1569 writeq(val64, &bar0->rmac_pause_cfg);
1572 * Set the Threshold Limit for Generating the pause frame
1573 * If the amount of data in any Queue exceeds ratio of
1574 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1575 * pause frame is generated
1578 for (i = 0; i < 4; i++) {
1580 (((u64) 0xFF00 | nic->mac_control.
1581 mc_pause_threshold_q0q3)
1584 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1587 for (i = 0; i < 4; i++) {
1589 (((u64) 0xFF00 | nic->mac_control.
1590 mc_pause_threshold_q4q7)
1593 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1596 * TxDMA will stop Read request if the number of read split has
1597 * exceeded the limit pointed by shared_splits
1599 val64 = readq(&bar0->pic_control);
1600 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1601 writeq(val64, &bar0->pic_control);
1603 if (nic->config.bus_speed == 266) {
1604 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1605 writeq(0x0, &bar0->read_retry_delay);
1606 writeq(0x0, &bar0->write_retry_delay);
1610 * Programming the Herc to split every write transaction
1611 * that does not start on an ADB to reduce disconnects.
1613 if (nic->device_type == XFRAME_II_DEVICE) {
1614 val64 = EXT_REQ_EN | MISC_LINK_STABILITY_PRD(3);
1615 writeq(val64, &bar0->misc_control);
1616 val64 = readq(&bar0->pic_control2);
1617 val64 &= ~(BIT(13)|BIT(14)|BIT(15));
1618 writeq(val64, &bar0->pic_control2);
1620 if (strstr(nic->product_name, "CX4")) {
1621 val64 = TMAC_AVG_IPG(0x17);
1622 writeq(val64, &bar0->tmac_avg_ipg);
1627 #define LINK_UP_DOWN_INTERRUPT 1
1628 #define MAC_RMAC_ERR_TIMER 2
1630 static int s2io_link_fault_indication(nic_t *nic)
1632 if (nic->intr_type != INTA)
1633 return MAC_RMAC_ERR_TIMER;
1634 if (nic->device_type == XFRAME_II_DEVICE)
1635 return LINK_UP_DOWN_INTERRUPT;
1637 return MAC_RMAC_ERR_TIMER;
1641 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1642 * @nic: device private variable,
1643 * @mask: A mask indicating which Intr block must be modified and,
1644 * @flag: A flag indicating whether to enable or disable the Intrs.
1645 * Description: This function will either disable or enable the interrupts
1646 * depending on the flag argument. The mask argument can be used to
1647 * enable/disable any Intr block.
1648 * Return Value: NONE.
1651 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1653 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1654 register u64 val64 = 0, temp64 = 0;
1656 /* Top level interrupt classification */
1657 /* PIC Interrupts */
1658 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1659 /* Enable PIC Intrs in the general intr mask register */
1660 val64 = TXPIC_INT_M | PIC_RX_INT_M;
1661 if (flag == ENABLE_INTRS) {
1662 temp64 = readq(&bar0->general_int_mask);
1663 temp64 &= ~((u64) val64);
1664 writeq(temp64, &bar0->general_int_mask);
1666 * If Hercules adapter enable GPIO otherwise
1667 * disable all PCIX, Flash, MDIO, IIC and GPIO
1668 * interrupts for now.
1671 if (s2io_link_fault_indication(nic) ==
1672 LINK_UP_DOWN_INTERRUPT ) {
1673 temp64 = readq(&bar0->pic_int_mask);
1674 temp64 &= ~((u64) PIC_INT_GPIO);
1675 writeq(temp64, &bar0->pic_int_mask);
1676 temp64 = readq(&bar0->gpio_int_mask);
1677 temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP);
1678 writeq(temp64, &bar0->gpio_int_mask);
1680 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1683 * No MSI Support is available presently, so TTI and
1684 * RTI interrupts are also disabled.
1686 } else if (flag == DISABLE_INTRS) {
1688 * Disable PIC Intrs in the general
1689 * intr mask register
1691 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1692 temp64 = readq(&bar0->general_int_mask);
1694 writeq(val64, &bar0->general_int_mask);
1698 /* DMA Interrupts */
1699 /* Enabling/Disabling Tx DMA interrupts */
1700 if (mask & TX_DMA_INTR) {
1701 /* Enable TxDMA Intrs in the general intr mask register */
1702 val64 = TXDMA_INT_M;
1703 if (flag == ENABLE_INTRS) {
1704 temp64 = readq(&bar0->general_int_mask);
1705 temp64 &= ~((u64) val64);
1706 writeq(temp64, &bar0->general_int_mask);
1708 * Keep all interrupts other than PFC interrupt
1709 * and PCC interrupt disabled in DMA level.
1711 val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1713 writeq(val64, &bar0->txdma_int_mask);
1715 * Enable only the MISC error 1 interrupt in PFC block
1717 val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1718 writeq(val64, &bar0->pfc_err_mask);
1720 * Enable only the FB_ECC error interrupt in PCC block
1722 val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1723 writeq(val64, &bar0->pcc_err_mask);
1724 } else if (flag == DISABLE_INTRS) {
1726 * Disable TxDMA Intrs in the general intr mask
1729 writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1730 writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1731 temp64 = readq(&bar0->general_int_mask);
1733 writeq(val64, &bar0->general_int_mask);
1737 /* Enabling/Disabling Rx DMA interrupts */
1738 if (mask & RX_DMA_INTR) {
1739 /* Enable RxDMA Intrs in the general intr mask register */
1740 val64 = RXDMA_INT_M;
1741 if (flag == ENABLE_INTRS) {
1742 temp64 = readq(&bar0->general_int_mask);
1743 temp64 &= ~((u64) val64);
1744 writeq(temp64, &bar0->general_int_mask);
1746 * All RxDMA block interrupts are disabled for now
1749 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1750 } else if (flag == DISABLE_INTRS) {
1752 * Disable RxDMA Intrs in the general intr mask
1755 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1756 temp64 = readq(&bar0->general_int_mask);
1758 writeq(val64, &bar0->general_int_mask);
1762 /* MAC Interrupts */
1763 /* Enabling/Disabling MAC interrupts */
1764 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1765 val64 = TXMAC_INT_M | RXMAC_INT_M;
1766 if (flag == ENABLE_INTRS) {
1767 temp64 = readq(&bar0->general_int_mask);
1768 temp64 &= ~((u64) val64);
1769 writeq(temp64, &bar0->general_int_mask);
1771 * All MAC block error interrupts are disabled for now
1774 } else if (flag == DISABLE_INTRS) {
1776 * Disable MAC Intrs in the general intr mask register
1778 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1779 writeq(DISABLE_ALL_INTRS,
1780 &bar0->mac_rmac_err_mask);
1782 temp64 = readq(&bar0->general_int_mask);
1784 writeq(val64, &bar0->general_int_mask);
1788 /* XGXS Interrupts */
1789 if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1790 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1791 if (flag == ENABLE_INTRS) {
1792 temp64 = readq(&bar0->general_int_mask);
1793 temp64 &= ~((u64) val64);
1794 writeq(temp64, &bar0->general_int_mask);
1796 * All XGXS block error interrupts are disabled for now
1799 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1800 } else if (flag == DISABLE_INTRS) {
1802 * Disable MC Intrs in the general intr mask register
1804 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1805 temp64 = readq(&bar0->general_int_mask);
1807 writeq(val64, &bar0->general_int_mask);
1811 /* Memory Controller(MC) interrupts */
1812 if (mask & MC_INTR) {
1814 if (flag == ENABLE_INTRS) {
1815 temp64 = readq(&bar0->general_int_mask);
1816 temp64 &= ~((u64) val64);
1817 writeq(temp64, &bar0->general_int_mask);
1819 * Enable all MC Intrs.
1821 writeq(0x0, &bar0->mc_int_mask);
1822 writeq(0x0, &bar0->mc_err_mask);
1823 } else if (flag == DISABLE_INTRS) {
1825 * Disable MC Intrs in the general intr mask register
1827 writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1828 temp64 = readq(&bar0->general_int_mask);
1830 writeq(val64, &bar0->general_int_mask);
1835 /* Tx traffic interrupts */
1836 if (mask & TX_TRAFFIC_INTR) {
1837 val64 = TXTRAFFIC_INT_M;
1838 if (flag == ENABLE_INTRS) {
1839 temp64 = readq(&bar0->general_int_mask);
1840 temp64 &= ~((u64) val64);
1841 writeq(temp64, &bar0->general_int_mask);
1843 * Enable all the Tx side interrupts
1844 * writing 0 Enables all 64 TX interrupt levels
1846 writeq(0x0, &bar0->tx_traffic_mask);
1847 } else if (flag == DISABLE_INTRS) {
1849 * Disable Tx Traffic Intrs in the general intr mask
1852 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1853 temp64 = readq(&bar0->general_int_mask);
1855 writeq(val64, &bar0->general_int_mask);
1859 /* Rx traffic interrupts */
1860 if (mask & RX_TRAFFIC_INTR) {
1861 val64 = RXTRAFFIC_INT_M;
1862 if (flag == ENABLE_INTRS) {
1863 temp64 = readq(&bar0->general_int_mask);
1864 temp64 &= ~((u64) val64);
1865 writeq(temp64, &bar0->general_int_mask);
1866 /* writing 0 Enables all 8 RX interrupt levels */
1867 writeq(0x0, &bar0->rx_traffic_mask);
1868 } else if (flag == DISABLE_INTRS) {
1870 * Disable Rx Traffic Intrs in the general intr mask
1873 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1874 temp64 = readq(&bar0->general_int_mask);
1876 writeq(val64, &bar0->general_int_mask);
1881 static int check_prc_pcc_state(u64 val64, int flag, int rev_id, int herc)
1885 if (flag == FALSE) {
1886 if ((!herc && (rev_id >= 4)) || herc) {
1887 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1888 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1889 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1893 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1894 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1895 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1900 if ((!herc && (rev_id >= 4)) || herc) {
1901 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1902 ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1903 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1904 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1905 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1909 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1910 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1911 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1912 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1913 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1922 * verify_xena_quiescence - Checks whether the H/W is ready
1923 * @val64 : Value read from adapter status register.
1924 * @flag : indicates if the adapter enable bit was ever written once
1926 * Description: Returns whether the H/W is ready to go or not. Depending
1927 * on whether adapter enable bit was written or not the comparison
1928 * differs and the calling function passes the input argument flag to
1930 * Return: 1 If xena is quiescence
1931 * 0 If Xena is not quiescence
1934 static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag)
1937 u64 tmp64 = ~((u64) val64);
1938 int rev_id = get_xena_rev_id(sp->pdev);
1940 herc = (sp->device_type == XFRAME_II_DEVICE);
1943 (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
1944 ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY |
1945 ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
1946 ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
1947 ADAPTER_STATUS_P_PLL_LOCK))) {
1948 ret = check_prc_pcc_state(val64, flag, rev_id, herc);
1955 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1956 * @sp: Pointer to device specifc structure
1958 * New procedure to clear mac address reading problems on Alpha platforms
1962 static void fix_mac_address(nic_t * sp)
1964 XENA_dev_config_t __iomem *bar0 = sp->bar0;
1968 while (fix_mac[i] != END_SIGN) {
1969 writeq(fix_mac[i++], &bar0->gpio_control);
1971 val64 = readq(&bar0->gpio_control);
1976 * start_nic - Turns the device on
1977 * @nic : device private variable.
1979 * This function actually turns the device on. Before this function is
1980 * called,all Registers are configured from their reset states
1981 * and shared memory is allocated but the NIC is still quiescent. On
1982 * calling this function, the device interrupts are cleared and the NIC is
1983 * literally switched on by writing into the adapter control register.
1985 * SUCCESS on success and -1 on failure.
1988 static int start_nic(struct s2io_nic *nic)
1990 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1991 struct net_device *dev = nic->dev;
1992 register u64 val64 = 0;
1994 mac_info_t *mac_control;
1995 struct config_param *config;
1997 mac_control = &nic->mac_control;
1998 config = &nic->config;
2000 /* PRC Initialization and configuration */
2001 for (i = 0; i < config->rx_ring_num; i++) {
2002 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
2003 &bar0->prc_rxd0_n[i]);
2005 val64 = readq(&bar0->prc_ctrl_n[i]);
2006 if (nic->config.bimodal)
2007 val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
2008 if (nic->rxd_mode == RXD_MODE_1)
2009 val64 |= PRC_CTRL_RC_ENABLED;
2011 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2012 if (nic->device_type == XFRAME_II_DEVICE)
2013 val64 |= PRC_CTRL_GROUP_READS;
2014 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2015 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2016 writeq(val64, &bar0->prc_ctrl_n[i]);
2019 if (nic->rxd_mode == RXD_MODE_3B) {
2020 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2021 val64 = readq(&bar0->rx_pa_cfg);
2022 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2023 writeq(val64, &bar0->rx_pa_cfg);
2027 * Enabling MC-RLDRAM. After enabling the device, we timeout
2028 * for around 100ms, which is approximately the time required
2029 * for the device to be ready for operation.
2031 val64 = readq(&bar0->mc_rldram_mrs);
2032 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2033 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2034 val64 = readq(&bar0->mc_rldram_mrs);
2036 msleep(100); /* Delay by around 100 ms. */
2038 /* Enabling ECC Protection. */
2039 val64 = readq(&bar0->adapter_control);
2040 val64 &= ~ADAPTER_ECC_EN;
2041 writeq(val64, &bar0->adapter_control);
2044 * Clearing any possible Link state change interrupts that
2045 * could have popped up just before Enabling the card.
2047 val64 = readq(&bar0->mac_rmac_err_reg);
2049 writeq(val64, &bar0->mac_rmac_err_reg);
2052 * Verify if the device is ready to be enabled, if so enable
2055 val64 = readq(&bar0->adapter_status);
2056 if (!verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
2057 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2058 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2059 (unsigned long long) val64);
2064 * With some switches, link might be already up at this point.
2065 * Because of this weird behavior, when we enable laser,
2066 * we may not get link. We need to handle this. We cannot
2067 * figure out which switch is misbehaving. So we are forced to
2068 * make a global change.
2071 /* Enabling Laser. */
2072 val64 = readq(&bar0->adapter_control);
2073 val64 |= ADAPTER_EOI_TX_ON;
2074 writeq(val64, &bar0->adapter_control);
2076 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2078 * Dont see link state interrupts initally on some switches,
2079 * so directly scheduling the link state task here.
2081 schedule_work(&nic->set_link_task);
2083 /* SXE-002: Initialize link and activity LED */
2084 subid = nic->pdev->subsystem_device;
2085 if (((subid & 0xFF) >= 0x07) &&
2086 (nic->device_type == XFRAME_I_DEVICE)) {
2087 val64 = readq(&bar0->gpio_control);
2088 val64 |= 0x0000800000000000ULL;
2089 writeq(val64, &bar0->gpio_control);
2090 val64 = 0x0411040400000000ULL;
2091 writeq(val64, (void __iomem *)bar0 + 0x2700);
2097 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2099 static struct sk_buff *s2io_txdl_getskb(fifo_info_t *fifo_data, TxD_t *txdlp, int get_off)
2101 nic_t *nic = fifo_data->nic;
2102 struct sk_buff *skb;
2107 if (txds->Host_Control == (u64)(long)nic->ufo_in_band_v) {
2108 pci_unmap_single(nic->pdev, (dma_addr_t)
2109 txds->Buffer_Pointer, sizeof(u64),
2114 skb = (struct sk_buff *) ((unsigned long)
2115 txds->Host_Control);
2117 memset(txdlp, 0, (sizeof(TxD_t) * fifo_data->max_txds));
2120 pci_unmap_single(nic->pdev, (dma_addr_t)
2121 txds->Buffer_Pointer,
2122 skb->len - skb->data_len,
2124 frg_cnt = skb_shinfo(skb)->nr_frags;
2127 for (j = 0; j < frg_cnt; j++, txds++) {
2128 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2129 if (!txds->Buffer_Pointer)
2131 pci_unmap_page(nic->pdev, (dma_addr_t)
2132 txds->Buffer_Pointer,
2133 frag->size, PCI_DMA_TODEVICE);
2136 memset(txdlp,0, (sizeof(TxD_t) * fifo_data->max_txds));
2141 * free_tx_buffers - Free all queued Tx buffers
2142 * @nic : device private variable.
2144 * Free all queued Tx buffers.
2145 * Return Value: void
2148 static void free_tx_buffers(struct s2io_nic *nic)
2150 struct net_device *dev = nic->dev;
2151 struct sk_buff *skb;
2154 mac_info_t *mac_control;
2155 struct config_param *config;
2158 mac_control = &nic->mac_control;
2159 config = &nic->config;
2161 for (i = 0; i < config->tx_fifo_num; i++) {
2162 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2163 txdp = (TxD_t *) mac_control->fifos[i].list_info[j].
2165 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2172 "%s:forcibly freeing %d skbs on FIFO%d\n",
2174 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2175 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2180 * stop_nic - To stop the nic
2181 * @nic ; device private variable.
2183 * This function does exactly the opposite of what the start_nic()
2184 * function does. This function is called to stop the device.
2189 static void stop_nic(struct s2io_nic *nic)
2191 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2192 register u64 val64 = 0;
2194 mac_info_t *mac_control;
2195 struct config_param *config;
2197 mac_control = &nic->mac_control;
2198 config = &nic->config;
2200 /* Disable all interrupts */
2201 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2202 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2203 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
2204 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2206 /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2207 val64 = readq(&bar0->adapter_control);
2208 val64 &= ~(ADAPTER_CNTL_EN);
2209 writeq(val64, &bar0->adapter_control);
2212 static int fill_rxd_3buf(nic_t *nic, RxD_t *rxdp, struct sk_buff *skb)
2214 struct net_device *dev = nic->dev;
2215 struct sk_buff *frag_list;
2218 /* Buffer-1 receives L3/L4 headers */
2219 ((RxD3_t*)rxdp)->Buffer1_ptr = pci_map_single
2220 (nic->pdev, skb->data, l3l4hdr_size + 4,
2221 PCI_DMA_FROMDEVICE);
2223 /* skb_shinfo(skb)->frag_list will have L4 data payload */
2224 skb_shinfo(skb)->frag_list = dev_alloc_skb(dev->mtu + ALIGN_SIZE);
2225 if (skb_shinfo(skb)->frag_list == NULL) {
2226 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n ", dev->name);
2229 frag_list = skb_shinfo(skb)->frag_list;
2230 frag_list->next = NULL;
2231 tmp = (void *)ALIGN((long)frag_list->data, ALIGN_SIZE + 1);
2232 frag_list->data = tmp;
2233 frag_list->tail = tmp;
2235 /* Buffer-2 receives L4 data payload */
2236 ((RxD3_t*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev,
2237 frag_list->data, dev->mtu,
2238 PCI_DMA_FROMDEVICE);
2239 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
2240 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
2246 * fill_rx_buffers - Allocates the Rx side skbs
2247 * @nic: device private variable
2248 * @ring_no: ring number
2250 * The function allocates Rx side skbs and puts the physical
2251 * address of these buffers into the RxD buffer pointers, so that the NIC
2252 * can DMA the received frame into these locations.
2253 * The NIC supports 3 receive modes, viz
2255 * 2. three buffer and
2256 * 3. Five buffer modes.
2257 * Each mode defines how many fragments the received frame will be split
2258 * up into by the NIC. The frame is split into L3 header, L4 Header,
2259 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2260 * is split into 3 fragments. As of now only single buffer mode is
2263 * SUCCESS on success or an appropriate -ve value on failure.
2266 static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2268 struct net_device *dev = nic->dev;
2269 struct sk_buff *skb;
2271 int off, off1, size, block_no, block_no1;
2274 mac_info_t *mac_control;
2275 struct config_param *config;
2278 unsigned long flags;
2279 RxD_t *first_rxdp = NULL;
2281 mac_control = &nic->mac_control;
2282 config = &nic->config;
2283 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2284 atomic_read(&nic->rx_bufs_left[ring_no]);
2286 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
2287 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2288 while (alloc_tab < alloc_cnt) {
2289 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2291 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2293 rxdp = mac_control->rings[ring_no].
2294 rx_blocks[block_no].rxds[off].virt_addr;
2296 if ((block_no == block_no1) && (off == off1) &&
2297 (rxdp->Host_Control)) {
2298 DBG_PRINT(INTR_DBG, "%s: Get and Put",
2300 DBG_PRINT(INTR_DBG, " info equated\n");
2303 if (off && (off == rxd_count[nic->rxd_mode])) {
2304 mac_control->rings[ring_no].rx_curr_put_info.
2306 if (mac_control->rings[ring_no].rx_curr_put_info.
2307 block_index == mac_control->rings[ring_no].
2309 mac_control->rings[ring_no].rx_curr_put_info.
2311 block_no = mac_control->rings[ring_no].
2312 rx_curr_put_info.block_index;
2313 if (off == rxd_count[nic->rxd_mode])
2315 mac_control->rings[ring_no].rx_curr_put_info.
2317 rxdp = mac_control->rings[ring_no].
2318 rx_blocks[block_no].block_virt_addr;
2319 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2323 spin_lock_irqsave(&nic->put_lock, flags);
2324 mac_control->rings[ring_no].put_pos =
2325 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2326 spin_unlock_irqrestore(&nic->put_lock, flags);
2328 mac_control->rings[ring_no].put_pos =
2329 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2331 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2332 ((nic->rxd_mode >= RXD_MODE_3A) &&
2333 (rxdp->Control_2 & BIT(0)))) {
2334 mac_control->rings[ring_no].rx_curr_put_info.
2338 /* calculate size of skb based on ring mode */
2339 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2340 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2341 if (nic->rxd_mode == RXD_MODE_1)
2342 size += NET_IP_ALIGN;
2343 else if (nic->rxd_mode == RXD_MODE_3B)
2344 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2346 size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
2349 skb = dev_alloc_skb(size);
2351 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
2352 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
2355 first_rxdp->Control_1 |= RXD_OWN_XENA;
2359 if (nic->rxd_mode == RXD_MODE_1) {
2360 /* 1 buffer mode - normal operation mode */
2361 memset(rxdp, 0, sizeof(RxD1_t));
2362 skb_reserve(skb, NET_IP_ALIGN);
2363 ((RxD1_t*)rxdp)->Buffer0_ptr = pci_map_single
2364 (nic->pdev, skb->data, size - NET_IP_ALIGN,
2365 PCI_DMA_FROMDEVICE);
2366 rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2368 } else if (nic->rxd_mode >= RXD_MODE_3A) {
2370 * 2 or 3 buffer mode -
2371 * Both 2 buffer mode and 3 buffer mode provides 128
2372 * byte aligned receive buffers.
2374 * 3 buffer mode provides header separation where in
2375 * skb->data will have L3/L4 headers where as
2376 * skb_shinfo(skb)->frag_list will have the L4 data
2380 memset(rxdp, 0, sizeof(RxD3_t));
2381 ba = &mac_control->rings[ring_no].ba[block_no][off];
2382 skb_reserve(skb, BUF0_LEN);
2383 tmp = (u64)(unsigned long) skb->data;
2386 skb->data = (void *) (unsigned long)tmp;
2387 skb->tail = (void *) (unsigned long)tmp;
2389 if (!(((RxD3_t*)rxdp)->Buffer0_ptr))
2390 ((RxD3_t*)rxdp)->Buffer0_ptr =
2391 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2392 PCI_DMA_FROMDEVICE);
2394 pci_dma_sync_single_for_device(nic->pdev,
2395 (dma_addr_t) ((RxD3_t*)rxdp)->Buffer0_ptr,
2396 BUF0_LEN, PCI_DMA_FROMDEVICE);
2397 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2398 if (nic->rxd_mode == RXD_MODE_3B) {
2399 /* Two buffer mode */
2402 * Buffer2 will have L3/L4 header plus
2405 ((RxD3_t*)rxdp)->Buffer2_ptr = pci_map_single
2406 (nic->pdev, skb->data, dev->mtu + 4,
2407 PCI_DMA_FROMDEVICE);
2409 /* Buffer-1 will be dummy buffer. Not used */
2410 if (!(((RxD3_t*)rxdp)->Buffer1_ptr)) {
2411 ((RxD3_t*)rxdp)->Buffer1_ptr =
2412 pci_map_single(nic->pdev,
2414 PCI_DMA_FROMDEVICE);
2416 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2417 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2421 if (fill_rxd_3buf(nic, rxdp, skb) == -ENOMEM) {
2422 dev_kfree_skb_irq(skb);
2425 first_rxdp->Control_1 |=
2431 rxdp->Control_2 |= BIT(0);
2433 rxdp->Host_Control = (unsigned long) (skb);
2434 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2435 rxdp->Control_1 |= RXD_OWN_XENA;
2437 if (off == (rxd_count[nic->rxd_mode] + 1))
2439 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2441 rxdp->Control_2 |= SET_RXD_MARKER;
2442 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2445 first_rxdp->Control_1 |= RXD_OWN_XENA;
2449 atomic_inc(&nic->rx_bufs_left[ring_no]);
2454 /* Transfer ownership of first descriptor to adapter just before
2455 * exiting. Before that, use memory barrier so that ownership
2456 * and other fields are seen by adapter correctly.
2460 first_rxdp->Control_1 |= RXD_OWN_XENA;
2466 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2468 struct net_device *dev = sp->dev;
2470 struct sk_buff *skb;
2472 mac_info_t *mac_control;
2475 mac_control = &sp->mac_control;
2476 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2477 rxdp = mac_control->rings[ring_no].
2478 rx_blocks[blk].rxds[j].virt_addr;
2479 skb = (struct sk_buff *)
2480 ((unsigned long) rxdp->Host_Control);
2484 if (sp->rxd_mode == RXD_MODE_1) {
2485 pci_unmap_single(sp->pdev, (dma_addr_t)
2486 ((RxD1_t*)rxdp)->Buffer0_ptr,
2488 HEADER_ETHERNET_II_802_3_SIZE
2489 + HEADER_802_2_SIZE +
2491 PCI_DMA_FROMDEVICE);
2492 memset(rxdp, 0, sizeof(RxD1_t));
2493 } else if(sp->rxd_mode == RXD_MODE_3B) {
2494 ba = &mac_control->rings[ring_no].
2496 pci_unmap_single(sp->pdev, (dma_addr_t)
2497 ((RxD3_t*)rxdp)->Buffer0_ptr,
2499 PCI_DMA_FROMDEVICE);
2500 pci_unmap_single(sp->pdev, (dma_addr_t)
2501 ((RxD3_t*)rxdp)->Buffer1_ptr,
2503 PCI_DMA_FROMDEVICE);
2504 pci_unmap_single(sp->pdev, (dma_addr_t)
2505 ((RxD3_t*)rxdp)->Buffer2_ptr,
2507 PCI_DMA_FROMDEVICE);
2508 memset(rxdp, 0, sizeof(RxD3_t));
2510 pci_unmap_single(sp->pdev, (dma_addr_t)
2511 ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN,
2512 PCI_DMA_FROMDEVICE);
2513 pci_unmap_single(sp->pdev, (dma_addr_t)
2514 ((RxD3_t*)rxdp)->Buffer1_ptr,
2516 PCI_DMA_FROMDEVICE);
2517 pci_unmap_single(sp->pdev, (dma_addr_t)
2518 ((RxD3_t*)rxdp)->Buffer2_ptr, dev->mtu,
2519 PCI_DMA_FROMDEVICE);
2520 memset(rxdp, 0, sizeof(RxD3_t));
2523 atomic_dec(&sp->rx_bufs_left[ring_no]);
2528 * free_rx_buffers - Frees all Rx buffers
2529 * @sp: device private variable.
2531 * This function will free all Rx buffers allocated by host.
2536 static void free_rx_buffers(struct s2io_nic *sp)
2538 struct net_device *dev = sp->dev;
2539 int i, blk = 0, buf_cnt = 0;
2540 mac_info_t *mac_control;
2541 struct config_param *config;
2543 mac_control = &sp->mac_control;
2544 config = &sp->config;
2546 for (i = 0; i < config->rx_ring_num; i++) {
2547 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2548 free_rxd_blk(sp,i,blk);
2550 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2551 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2552 mac_control->rings[i].rx_curr_put_info.offset = 0;
2553 mac_control->rings[i].rx_curr_get_info.offset = 0;
2554 atomic_set(&sp->rx_bufs_left[i], 0);
2555 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2556 dev->name, buf_cnt, i);
2561 * s2io_poll - Rx interrupt handler for NAPI support
2562 * @dev : pointer to the device structure.
2563 * @budget : The number of packets that were budgeted to be processed
2564 * during one pass through the 'Poll" function.
2566 * Comes into picture only if NAPI support has been incorporated. It does
2567 * the same thing that rx_intr_handler does, but not in a interrupt context
2568 * also It will process only a given number of packets.
2570 * 0 on success and 1 if there are No Rx packets to be processed.
2573 static int s2io_poll(struct net_device *dev, int *budget)
2575 nic_t *nic = dev->priv;
2576 int pkt_cnt = 0, org_pkts_to_process;
2577 mac_info_t *mac_control;
2578 struct config_param *config;
2579 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2580 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2583 atomic_inc(&nic->isr_cnt);
2584 mac_control = &nic->mac_control;
2585 config = &nic->config;
2587 nic->pkts_to_process = *budget;
2588 if (nic->pkts_to_process > dev->quota)
2589 nic->pkts_to_process = dev->quota;
2590 org_pkts_to_process = nic->pkts_to_process;
2592 writeq(val64, &bar0->rx_traffic_int);
2593 val64 = readl(&bar0->rx_traffic_int);
2595 for (i = 0; i < config->rx_ring_num; i++) {
2596 rx_intr_handler(&mac_control->rings[i]);
2597 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2598 if (!nic->pkts_to_process) {
2599 /* Quota for the current iteration has been met */
2606 dev->quota -= pkt_cnt;
2608 netif_rx_complete(dev);
2610 for (i = 0; i < config->rx_ring_num; i++) {
2611 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2612 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2613 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2617 /* Re enable the Rx interrupts. */
2618 writeq(0x0, &bar0->rx_traffic_mask);
2619 val64 = readl(&bar0->rx_traffic_mask);
2620 atomic_dec(&nic->isr_cnt);
2624 dev->quota -= pkt_cnt;
2627 for (i = 0; i < config->rx_ring_num; i++) {
2628 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2629 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2630 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2634 atomic_dec(&nic->isr_cnt);
2638 #ifdef CONFIG_NET_POLL_CONTROLLER
2640 * s2io_netpoll - netpoll event handler entry point
2641 * @dev : pointer to the device structure.
2643 * This function will be called by upper layer to check for events on the
2644 * interface in situations where interrupts are disabled. It is used for
2645 * specific in-kernel networking tasks, such as remote consoles and kernel
2646 * debugging over the network (example netdump in RedHat).
2648 static void s2io_netpoll(struct net_device *dev)
2650 nic_t *nic = dev->priv;
2651 mac_info_t *mac_control;
2652 struct config_param *config;
2653 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2654 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2657 disable_irq(dev->irq);
2659 atomic_inc(&nic->isr_cnt);
2660 mac_control = &nic->mac_control;
2661 config = &nic->config;
2663 writeq(val64, &bar0->rx_traffic_int);
2664 writeq(val64, &bar0->tx_traffic_int);
2666 /* we need to free up the transmitted skbufs or else netpoll will
2667 * run out of skbs and will fail and eventually netpoll application such
2668 * as netdump will fail.
2670 for (i = 0; i < config->tx_fifo_num; i++)
2671 tx_intr_handler(&mac_control->fifos[i]);
2673 /* check for received packet and indicate up to network */
2674 for (i = 0; i < config->rx_ring_num; i++)
2675 rx_intr_handler(&mac_control->rings[i]);
2677 for (i = 0; i < config->rx_ring_num; i++) {
2678 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2679 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2680 DBG_PRINT(ERR_DBG, " in Rx Netpoll!!\n");
2684 atomic_dec(&nic->isr_cnt);
2685 enable_irq(dev->irq);
2691 * rx_intr_handler - Rx interrupt handler
2692 * @nic: device private variable.
2694 * If the interrupt is because of a received frame or if the
2695 * receive ring contains fresh as yet un-processed frames,this function is
2696 * called. It picks out the RxD at which place the last Rx processing had
2697 * stopped and sends the skb to the OSM's Rx handler and then increments
2702 static void rx_intr_handler(ring_info_t *ring_data)
2704 nic_t *nic = ring_data->nic;
2705 struct net_device *dev = (struct net_device *) nic->dev;
2706 int get_block, put_block, put_offset;
2707 rx_curr_get_info_t get_info, put_info;
2709 struct sk_buff *skb;
2713 spin_lock(&nic->rx_lock);
2714 if (atomic_read(&nic->card_state) == CARD_DOWN) {
2715 DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n",
2716 __FUNCTION__, dev->name);
2717 spin_unlock(&nic->rx_lock);
2721 get_info = ring_data->rx_curr_get_info;
2722 get_block = get_info.block_index;
2723 put_info = ring_data->rx_curr_put_info;
2724 put_block = put_info.block_index;
2725 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2727 spin_lock(&nic->put_lock);
2728 put_offset = ring_data->put_pos;
2729 spin_unlock(&nic->put_lock);
2731 put_offset = ring_data->put_pos;
2733 while (RXD_IS_UP2DT(rxdp)) {
2735 * If your are next to put index then it's
2736 * FIFO full condition
2738 if ((get_block == put_block) &&
2739 (get_info.offset + 1) == put_info.offset) {
2740 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
2743 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2745 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2747 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2748 spin_unlock(&nic->rx_lock);
2751 if (nic->rxd_mode == RXD_MODE_1) {
2752 pci_unmap_single(nic->pdev, (dma_addr_t)
2753 ((RxD1_t*)rxdp)->Buffer0_ptr,
2755 HEADER_ETHERNET_II_802_3_SIZE +
2758 PCI_DMA_FROMDEVICE);
2759 } else if (nic->rxd_mode == RXD_MODE_3B) {
2760 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2761 ((RxD3_t*)rxdp)->Buffer0_ptr,
2762 BUF0_LEN, PCI_DMA_FROMDEVICE);
2763 pci_unmap_single(nic->pdev, (dma_addr_t)
2764 ((RxD3_t*)rxdp)->Buffer2_ptr,
2766 PCI_DMA_FROMDEVICE);
2768 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2769 ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN,
2770 PCI_DMA_FROMDEVICE);
2771 pci_unmap_single(nic->pdev, (dma_addr_t)
2772 ((RxD3_t*)rxdp)->Buffer1_ptr,
2774 PCI_DMA_FROMDEVICE);
2775 pci_unmap_single(nic->pdev, (dma_addr_t)
2776 ((RxD3_t*)rxdp)->Buffer2_ptr,
2777 dev->mtu, PCI_DMA_FROMDEVICE);
2779 prefetch(skb->data);
2780 rx_osm_handler(ring_data, rxdp);
2782 ring_data->rx_curr_get_info.offset = get_info.offset;
2783 rxdp = ring_data->rx_blocks[get_block].
2784 rxds[get_info.offset].virt_addr;
2785 if (get_info.offset == rxd_count[nic->rxd_mode]) {
2786 get_info.offset = 0;
2787 ring_data->rx_curr_get_info.offset = get_info.offset;
2789 if (get_block == ring_data->block_count)
2791 ring_data->rx_curr_get_info.block_index = get_block;
2792 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2795 nic->pkts_to_process -= 1;
2796 if ((napi) && (!nic->pkts_to_process))
2799 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2803 /* Clear all LRO sessions before exiting */
2804 for (i=0; i<MAX_LRO_SESSIONS; i++) {
2805 lro_t *lro = &nic->lro0_n[i];
2807 update_L3L4_header(nic, lro);
2808 queue_rx_frame(lro->parent);
2809 clear_lro_session(lro);
2814 spin_unlock(&nic->rx_lock);
2818 * tx_intr_handler - Transmit interrupt handler
2819 * @nic : device private variable
2821 * If an interrupt was raised to indicate DMA complete of the
2822 * Tx packet, this function is called. It identifies the last TxD
2823 * whose buffer was freed and frees all skbs whose data have already
2824 * DMA'ed into the NICs internal memory.
2829 static void tx_intr_handler(fifo_info_t *fifo_data)
2831 nic_t *nic = fifo_data->nic;
2832 struct net_device *dev = (struct net_device *) nic->dev;
2833 tx_curr_get_info_t get_info, put_info;
2834 struct sk_buff *skb;
2837 get_info = fifo_data->tx_curr_get_info;
2838 put_info = fifo_data->tx_curr_put_info;
2839 txdlp = (TxD_t *) fifo_data->list_info[get_info.offset].
2841 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2842 (get_info.offset != put_info.offset) &&
2843 (txdlp->Host_Control)) {
2844 /* Check for TxD errors */
2845 if (txdlp->Control_1 & TXD_T_CODE) {
2846 unsigned long long err;
2847 err = txdlp->Control_1 & TXD_T_CODE;
2849 nic->mac_control.stats_info->sw_stat.
2852 if ((err >> 48) == 0xA) {
2853 DBG_PRINT(TX_DBG, "TxD returned due \
2854 to loss of link\n");
2857 DBG_PRINT(ERR_DBG, "***TxD error \
2862 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
2864 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2866 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2870 /* Updating the statistics block */
2871 nic->stats.tx_bytes += skb->len;
2872 dev_kfree_skb_irq(skb);
2875 if (get_info.offset == get_info.fifo_len + 1)
2876 get_info.offset = 0;
2877 txdlp = (TxD_t *) fifo_data->list_info
2878 [get_info.offset].list_virt_addr;
2879 fifo_data->tx_curr_get_info.offset =
2883 spin_lock(&nic->tx_lock);
2884 if (netif_queue_stopped(dev))
2885 netif_wake_queue(dev);
2886 spin_unlock(&nic->tx_lock);
2890 * s2io_mdio_write - Function to write in to MDIO registers
2891 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2892 * @addr : address value
2893 * @value : data value
2894 * @dev : pointer to net_device structure
2896 * This function is used to write values to the MDIO registers
2899 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
2902 nic_t *sp = dev->priv;
2903 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2905 //address transaction
2906 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2907 | MDIO_MMD_DEV_ADDR(mmd_type)
2908 | MDIO_MMS_PRT_ADDR(0x0);
2909 writeq(val64, &bar0->mdio_control);
2910 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2911 writeq(val64, &bar0->mdio_control);
2916 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2917 | MDIO_MMD_DEV_ADDR(mmd_type)
2918 | MDIO_MMS_PRT_ADDR(0x0)
2919 | MDIO_MDIO_DATA(value)
2920 | MDIO_OP(MDIO_OP_WRITE_TRANS);
2921 writeq(val64, &bar0->mdio_control);
2922 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2923 writeq(val64, &bar0->mdio_control);
2927 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2928 | MDIO_MMD_DEV_ADDR(mmd_type)
2929 | MDIO_MMS_PRT_ADDR(0x0)
2930 | MDIO_OP(MDIO_OP_READ_TRANS);
2931 writeq(val64, &bar0->mdio_control);
2932 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2933 writeq(val64, &bar0->mdio_control);
2939 * s2io_mdio_read - Function to write in to MDIO registers
2940 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2941 * @addr : address value
2942 * @dev : pointer to net_device structure
2944 * This function is used to read values to the MDIO registers
2947 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
2951 nic_t *sp = dev->priv;
2952 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2954 /* address transaction */
2955 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2956 | MDIO_MMD_DEV_ADDR(mmd_type)
2957 | MDIO_MMS_PRT_ADDR(0x0);
2958 writeq(val64, &bar0->mdio_control);
2959 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2960 writeq(val64, &bar0->mdio_control);
2963 /* Data transaction */
2965 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2966 | MDIO_MMD_DEV_ADDR(mmd_type)
2967 | MDIO_MMS_PRT_ADDR(0x0)
2968 | MDIO_OP(MDIO_OP_READ_TRANS);
2969 writeq(val64, &bar0->mdio_control);
2970 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2971 writeq(val64, &bar0->mdio_control);
2974 /* Read the value from regs */
2975 rval64 = readq(&bar0->mdio_control);
2976 rval64 = rval64 & 0xFFFF0000;
2977 rval64 = rval64 >> 16;
2981 * s2io_chk_xpak_counter - Function to check the status of the xpak counters
2982 * @counter : couter value to be updated
2983 * @flag : flag to indicate the status
2984 * @type : counter type
2986 * This function is to check the status of the xpak counters value
2990 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
2995 for(i = 0; i <index; i++)
3000 *counter = *counter + 1;
3001 val64 = *regs_stat & mask;
3002 val64 = val64 >> (index * 0x2);
3009 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3010 "service. Excessive temperatures may "
3011 "result in premature transceiver "
3015 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3016 "service Excessive bias currents may "
3017 "indicate imminent laser diode "
3021 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3022 "service Excessive laser output "
3023 "power may saturate far-end "
3027 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
3032 val64 = val64 << (index * 0x2);
3033 *regs_stat = (*regs_stat & (~mask)) | (val64);
3036 *regs_stat = *regs_stat & (~mask);
3041 * s2io_updt_xpak_counter - Function to update the xpak counters
3042 * @dev : pointer to net_device struct
3044 * This function is to upate the status of the xpak counters value
3047 static void s2io_updt_xpak_counter(struct net_device *dev)
3055 nic_t *sp = dev->priv;
3056 StatInfo_t *stat_info = sp->mac_control.stats_info;
3058 /* Check the communication with the MDIO slave */
3061 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3062 if((val64 == 0xFFFF) || (val64 == 0x0000))
3064 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3065 "Returned %llx\n", (unsigned long long)val64);
3069 /* Check for the expecte value of 2040 at PMA address 0x0000 */
3072 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3073 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3074 (unsigned long long)val64);
3078 /* Loading the DOM register to MDIO register */
3080 s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3081 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3083 /* Reading the Alarm flags */
3086 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3088 flag = CHECKBIT(val64, 0x7);
3090 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3091 &stat_info->xpak_stat.xpak_regs_stat,
3094 if(CHECKBIT(val64, 0x6))
3095 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3097 flag = CHECKBIT(val64, 0x3);
3099 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3100 &stat_info->xpak_stat.xpak_regs_stat,
3103 if(CHECKBIT(val64, 0x2))
3104 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3106 flag = CHECKBIT(val64, 0x1);
3108 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,