1dcda887cef733b80e7ceffe726c3023cfc23ccd
[linux-3.10.git] / drivers / net / s2io.c
1 /************************************************************************
2  * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3  * Copyright(c) 2002-2005 Neterion Inc.
4
5  * This software may be used and distributed according to the terms of
6  * the GNU General Public License (GPL), incorporated herein by reference.
7  * Drivers based on or derived from this code fall under the GPL and must
8  * retain the authorship, copyright and license notice.  This file is not
9  * a complete program and may only be used when the entire operating
10  * system is licensed under the GPL.
11  * See the file COPYING in this distribution for more information.
12  *
13  * Credits:
14  * Jeff Garzik          : For pointing out the improper error condition
15  *                        check in the s2io_xmit routine and also some
16  *                        issues in the Tx watch dog function. Also for
17  *                        patiently answering all those innumerable
18  *                        questions regaring the 2.6 porting issues.
19  * Stephen Hemminger    : Providing proper 2.6 porting mechanism for some
20  *                        macros available only in 2.6 Kernel.
21  * Francois Romieu      : For pointing out all code part that were
22  *                        deprecated and also styling related comments.
23  * Grant Grundler       : For helping me get rid of some Architecture
24  *                        dependent code.
25  * Christopher Hellwig  : Some more 2.6 specific issues in the driver.
26  *
27  * The module loadable parameters that are supported by the driver and a brief
28  * explaination of all the variables.
29  *
30  * rx_ring_num : This can be used to program the number of receive rings used
31  * in the driver.
32  * rx_ring_sz: This defines the number of receive blocks each ring can have.
33  *     This is also an array of size 8.
34  * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35  *              values are 1, 2 and 3.
36  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37  * tx_fifo_len: This too is an array of 8. Each element defines the number of
38  * Tx descriptors that can be associated with each corresponding FIFO.
39  * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40  *     1(MSI), 2(MSI_X). Default value is '0(INTA)'
41  * lro: Specifies whether to enable Large Receive Offload (LRO) or not.
42  *     Possible values '1' for enable '0' for disable. Default is '0'
43  * lro_max_pkts: This parameter defines maximum number of packets can be
44  *     aggregated as a single large packet
45  ************************************************************************/
46
47 #include <linux/config.h>
48 #include <linux/module.h>
49 #include <linux/types.h>
50 #include <linux/errno.h>
51 #include <linux/ioport.h>
52 #include <linux/pci.h>
53 #include <linux/dma-mapping.h>
54 #include <linux/kernel.h>
55 #include <linux/netdevice.h>
56 #include <linux/etherdevice.h>
57 #include <linux/skbuff.h>
58 #include <linux/init.h>
59 #include <linux/delay.h>
60 #include <linux/stddef.h>
61 #include <linux/ioctl.h>
62 #include <linux/timex.h>
63 #include <linux/sched.h>
64 #include <linux/ethtool.h>
65 #include <linux/workqueue.h>
66 #include <linux/if_vlan.h>
67 #include <linux/ip.h>
68 #include <linux/tcp.h>
69 #include <net/tcp.h>
70
71 #include <asm/system.h>
72 #include <asm/uaccess.h>
73 #include <asm/io.h>
74 #include <asm/div64.h>
75
76 /* local include */
77 #include "s2io.h"
78 #include "s2io-regs.h"
79
80 #define DRV_VERSION "2.0.11.2"
81
82 /* S2io Driver name & version. */
83 static char s2io_driver_name[] = "Neterion";
84 static char s2io_driver_version[] = DRV_VERSION;
85
86 static int rxd_size[4] = {32,48,48,64};
87 static int rxd_count[4] = {127,85,85,63};
88
89 static inline int RXD_IS_UP2DT(RxD_t *rxdp)
90 {
91         int ret;
92
93         ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
94                 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
95
96         return ret;
97 }
98
99 /*
100  * Cards with following subsystem_id have a link state indication
101  * problem, 600B, 600C, 600D, 640B, 640C and 640D.
102  * macro below identifies these cards given the subsystem_id.
103  */
104 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
105         (dev_type == XFRAME_I_DEVICE) ?                 \
106                 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
107                  ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
108
109 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
110                                       ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
111 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
112 #define PANIC   1
113 #define LOW     2
114 static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
115 {
116         mac_info_t *mac_control;
117
118         mac_control = &sp->mac_control;
119         if (rxb_size <= rxd_count[sp->rxd_mode])
120                 return PANIC;
121         else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
122                 return  LOW;
123         return 0;
124 }
125
126 /* Ethtool related variables and Macros. */
127 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
128         "Register test\t(offline)",
129         "Eeprom test\t(offline)",
130         "Link test\t(online)",
131         "RLDRAM test\t(offline)",
132         "BIST Test\t(offline)"
133 };
134
135 static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
136         {"tmac_frms"},
137         {"tmac_data_octets"},
138         {"tmac_drop_frms"},
139         {"tmac_mcst_frms"},
140         {"tmac_bcst_frms"},
141         {"tmac_pause_ctrl_frms"},
142         {"tmac_any_err_frms"},
143         {"tmac_vld_ip_octets"},
144         {"tmac_vld_ip"},
145         {"tmac_drop_ip"},
146         {"tmac_icmp"},
147         {"tmac_rst_tcp"},
148         {"tmac_tcp"},
149         {"tmac_udp"},
150         {"rmac_vld_frms"},
151         {"rmac_data_octets"},
152         {"rmac_fcs_err_frms"},
153         {"rmac_drop_frms"},
154         {"rmac_vld_mcst_frms"},
155         {"rmac_vld_bcst_frms"},
156         {"rmac_in_rng_len_err_frms"},
157         {"rmac_long_frms"},
158         {"rmac_pause_ctrl_frms"},
159         {"rmac_discarded_frms"},
160         {"rmac_usized_frms"},
161         {"rmac_osized_frms"},
162         {"rmac_frag_frms"},
163         {"rmac_jabber_frms"},
164         {"rmac_ip"},
165         {"rmac_ip_octets"},
166         {"rmac_hdr_err_ip"},
167         {"rmac_drop_ip"},
168         {"rmac_icmp"},
169         {"rmac_tcp"},
170         {"rmac_udp"},
171         {"rmac_err_drp_udp"},
172         {"rmac_pause_cnt"},
173         {"rmac_accepted_ip"},
174         {"rmac_err_tcp"},
175         {"\n DRIVER STATISTICS"},
176         {"single_bit_ecc_errs"},
177         {"double_bit_ecc_errs"},
178         ("lro_aggregated_pkts"),
179         ("lro_flush_both_count"),
180         ("lro_out_of_sequence_pkts"),
181         ("lro_flush_due_to_max_pkts"),
182         ("lro_avg_aggr_pkts"),
183 };
184
185 #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
186 #define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
187
188 #define S2IO_TEST_LEN   sizeof(s2io_gstrings) / ETH_GSTRING_LEN
189 #define S2IO_STRINGS_LEN        S2IO_TEST_LEN * ETH_GSTRING_LEN
190
191 #define S2IO_TIMER_CONF(timer, handle, arg, exp)                \
192                         init_timer(&timer);                     \
193                         timer.function = handle;                \
194                         timer.data = (unsigned long) arg;       \
195                         mod_timer(&timer, (jiffies + exp))      \
196
197 /* Add the vlan */
198 static void s2io_vlan_rx_register(struct net_device *dev,
199                                         struct vlan_group *grp)
200 {
201         nic_t *nic = dev->priv;
202         unsigned long flags;
203
204         spin_lock_irqsave(&nic->tx_lock, flags);
205         nic->vlgrp = grp;
206         spin_unlock_irqrestore(&nic->tx_lock, flags);
207 }
208
209 /* Unregister the vlan */
210 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
211 {
212         nic_t *nic = dev->priv;
213         unsigned long flags;
214
215         spin_lock_irqsave(&nic->tx_lock, flags);
216         if (nic->vlgrp)
217                 nic->vlgrp->vlan_devices[vid] = NULL;
218         spin_unlock_irqrestore(&nic->tx_lock, flags);
219 }
220
221 /*
222  * Constants to be programmed into the Xena's registers, to configure
223  * the XAUI.
224  */
225
226 #define SWITCH_SIGN     0xA5A5A5A5A5A5A5A5ULL
227 #define END_SIGN        0x0
228
229 static const u64 herc_act_dtx_cfg[] = {
230         /* Set address */
231         0x8000051536750000ULL, 0x80000515367500E0ULL,
232         /* Write data */
233         0x8000051536750004ULL, 0x80000515367500E4ULL,
234         /* Set address */
235         0x80010515003F0000ULL, 0x80010515003F00E0ULL,
236         /* Write data */
237         0x80010515003F0004ULL, 0x80010515003F00E4ULL,
238         /* Set address */
239         0x801205150D440000ULL, 0x801205150D4400E0ULL,
240         /* Write data */
241         0x801205150D440004ULL, 0x801205150D4400E4ULL,
242         /* Set address */
243         0x80020515F2100000ULL, 0x80020515F21000E0ULL,
244         /* Write data */
245         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
246         /* Done */
247         END_SIGN
248 };
249
250 static const u64 xena_mdio_cfg[] = {
251         /* Reset PMA PLL */
252         0xC001010000000000ULL, 0xC0010100000000E0ULL,
253         0xC0010100008000E4ULL,
254         /* Remove Reset from PMA PLL */
255         0xC001010000000000ULL, 0xC0010100000000E0ULL,
256         0xC0010100000000E4ULL,
257         END_SIGN
258 };
259
260 static const u64 xena_dtx_cfg[] = {
261         0x8000051500000000ULL, 0x80000515000000E0ULL,
262         0x80000515D93500E4ULL, 0x8001051500000000ULL,
263         0x80010515000000E0ULL, 0x80010515001E00E4ULL,
264         0x8002051500000000ULL, 0x80020515000000E0ULL,
265         0x80020515F21000E4ULL,
266         /* Set PADLOOPBACKN */
267         0x8002051500000000ULL, 0x80020515000000E0ULL,
268         0x80020515B20000E4ULL, 0x8003051500000000ULL,
269         0x80030515000000E0ULL, 0x80030515B20000E4ULL,
270         0x8004051500000000ULL, 0x80040515000000E0ULL,
271         0x80040515B20000E4ULL, 0x8005051500000000ULL,
272         0x80050515000000E0ULL, 0x80050515B20000E4ULL,
273         SWITCH_SIGN,
274         /* Remove PADLOOPBACKN */
275         0x8002051500000000ULL, 0x80020515000000E0ULL,
276         0x80020515F20000E4ULL, 0x8003051500000000ULL,
277         0x80030515000000E0ULL, 0x80030515F20000E4ULL,
278         0x8004051500000000ULL, 0x80040515000000E0ULL,
279         0x80040515F20000E4ULL, 0x8005051500000000ULL,
280         0x80050515000000E0ULL, 0x80050515F20000E4ULL,
281         END_SIGN
282 };
283
284 /*
285  * Constants for Fixing the MacAddress problem seen mostly on
286  * Alpha machines.
287  */
288 static const u64 fix_mac[] = {
289         0x0060000000000000ULL, 0x0060600000000000ULL,
290         0x0040600000000000ULL, 0x0000600000000000ULL,
291         0x0020600000000000ULL, 0x0060600000000000ULL,
292         0x0020600000000000ULL, 0x0060600000000000ULL,
293         0x0020600000000000ULL, 0x0060600000000000ULL,
294         0x0020600000000000ULL, 0x0060600000000000ULL,
295         0x0020600000000000ULL, 0x0060600000000000ULL,
296         0x0020600000000000ULL, 0x0060600000000000ULL,
297         0x0020600000000000ULL, 0x0060600000000000ULL,
298         0x0020600000000000ULL, 0x0060600000000000ULL,
299         0x0020600000000000ULL, 0x0060600000000000ULL,
300         0x0020600000000000ULL, 0x0060600000000000ULL,
301         0x0020600000000000ULL, 0x0000600000000000ULL,
302         0x0040600000000000ULL, 0x0060600000000000ULL,
303         END_SIGN
304 };
305
306 /* Module Loadable parameters. */
307 static unsigned int tx_fifo_num = 1;
308 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
309     {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
310 static unsigned int rx_ring_num = 1;
311 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
312     {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
313 static unsigned int rts_frm_len[MAX_RX_RINGS] =
314     {[0 ...(MAX_RX_RINGS - 1)] = 0 };
315 static unsigned int rx_ring_mode = 1;
316 static unsigned int use_continuous_tx_intrs = 1;
317 static unsigned int rmac_pause_time = 0x100;
318 static unsigned int mc_pause_threshold_q0q3 = 187;
319 static unsigned int mc_pause_threshold_q4q7 = 187;
320 static unsigned int shared_splits;
321 static unsigned int tmac_util_period = 5;
322 static unsigned int rmac_util_period = 5;
323 static unsigned int bimodal = 0;
324 static unsigned int l3l4hdr_size = 128;
325 #ifndef CONFIG_S2IO_NAPI
326 static unsigned int indicate_max_pkts;
327 #endif
328 /* Frequency of Rx desc syncs expressed as power of 2 */
329 static unsigned int rxsync_frequency = 3;
330 /* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */
331 static unsigned int intr_type = 0;
332 /* Large receive offload feature */
333 static unsigned int lro = 0;
334 /* Max pkts to be aggregated by LRO at one time. If not specified,
335  * aggregation happens until we hit max IP pkt size(64K)
336  */
337 static unsigned int lro_max_pkts = 0xFFFF;
338
339 /*
340  * S2IO device table.
341  * This table lists all the devices that this driver supports.
342  */
343 static struct pci_device_id s2io_tbl[] __devinitdata = {
344         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
345          PCI_ANY_ID, PCI_ANY_ID},
346         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
347          PCI_ANY_ID, PCI_ANY_ID},
348         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
349          PCI_ANY_ID, PCI_ANY_ID},
350         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
351          PCI_ANY_ID, PCI_ANY_ID},
352         {0,}
353 };
354
355 MODULE_DEVICE_TABLE(pci, s2io_tbl);
356
357 static struct pci_driver s2io_driver = {
358       .name = "S2IO",
359       .id_table = s2io_tbl,
360       .probe = s2io_init_nic,
361       .remove = __devexit_p(s2io_rem_nic),
362 };
363
364 /* A simplifier macro used both by init and free shared_mem Fns(). */
365 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
366
367 /**
368  * init_shared_mem - Allocation and Initialization of Memory
369  * @nic: Device private variable.
370  * Description: The function allocates all the memory areas shared
371  * between the NIC and the driver. This includes Tx descriptors,
372  * Rx descriptors and the statistics block.
373  */
374
375 static int init_shared_mem(struct s2io_nic *nic)
376 {
377         u32 size;
378         void *tmp_v_addr, *tmp_v_addr_next;
379         dma_addr_t tmp_p_addr, tmp_p_addr_next;
380         RxD_block_t *pre_rxd_blk = NULL;
381         int i, j, blk_cnt, rx_sz, tx_sz;
382         int lst_size, lst_per_page;
383         struct net_device *dev = nic->dev;
384         unsigned long tmp;
385         buffAdd_t *ba;
386
387         mac_info_t *mac_control;
388         struct config_param *config;
389
390         mac_control = &nic->mac_control;
391         config = &nic->config;
392
393
394         /* Allocation and initialization of TXDLs in FIOFs */
395         size = 0;
396         for (i = 0; i < config->tx_fifo_num; i++) {
397                 size += config->tx_cfg[i].fifo_len;
398         }
399         if (size > MAX_AVAILABLE_TXDS) {
400                 DBG_PRINT(ERR_DBG, "%s: Requested TxDs too high, ",
401                           __FUNCTION__);
402                 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
403                 return FAILURE;
404         }
405
406         lst_size = (sizeof(TxD_t) * config->max_txds);
407         tx_sz = lst_size * size;
408         lst_per_page = PAGE_SIZE / lst_size;
409
410         for (i = 0; i < config->tx_fifo_num; i++) {
411                 int fifo_len = config->tx_cfg[i].fifo_len;
412                 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
413                 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
414                                                           GFP_KERNEL);
415                 if (!mac_control->fifos[i].list_info) {
416                         DBG_PRINT(ERR_DBG,
417                                   "Malloc failed for list_info\n");
418                         return -ENOMEM;
419                 }
420                 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
421         }
422         for (i = 0; i < config->tx_fifo_num; i++) {
423                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
424                                                 lst_per_page);
425                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
426                 mac_control->fifos[i].tx_curr_put_info.fifo_len =
427                     config->tx_cfg[i].fifo_len - 1;
428                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
429                 mac_control->fifos[i].tx_curr_get_info.fifo_len =
430                     config->tx_cfg[i].fifo_len - 1;
431                 mac_control->fifos[i].fifo_no = i;
432                 mac_control->fifos[i].nic = nic;
433                 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
434
435                 for (j = 0; j < page_num; j++) {
436                         int k = 0;
437                         dma_addr_t tmp_p;
438                         void *tmp_v;
439                         tmp_v = pci_alloc_consistent(nic->pdev,
440                                                      PAGE_SIZE, &tmp_p);
441                         if (!tmp_v) {
442                                 DBG_PRINT(ERR_DBG,
443                                           "pci_alloc_consistent ");
444                                 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
445                                 return -ENOMEM;
446                         }
447                         /* If we got a zero DMA address(can happen on
448                          * certain platforms like PPC), reallocate.
449                          * Store virtual address of page we don't want,
450                          * to be freed later.
451                          */
452                         if (!tmp_p) {
453                                 mac_control->zerodma_virt_addr = tmp_v;
454                                 DBG_PRINT(INIT_DBG, 
455                                 "%s: Zero DMA address for TxDL. ", dev->name);
456                                 DBG_PRINT(INIT_DBG, 
457                                 "Virtual address %p\n", tmp_v);
458                                 tmp_v = pci_alloc_consistent(nic->pdev,
459                                                      PAGE_SIZE, &tmp_p);
460                                 if (!tmp_v) {
461                                         DBG_PRINT(ERR_DBG,
462                                           "pci_alloc_consistent ");
463                                         DBG_PRINT(ERR_DBG, "failed for TxDL\n");
464                                         return -ENOMEM;
465                                 }
466                         }
467                         while (k < lst_per_page) {
468                                 int l = (j * lst_per_page) + k;
469                                 if (l == config->tx_cfg[i].fifo_len)
470                                         break;
471                                 mac_control->fifos[i].list_info[l].list_virt_addr =
472                                     tmp_v + (k * lst_size);
473                                 mac_control->fifos[i].list_info[l].list_phy_addr =
474                                     tmp_p + (k * lst_size);
475                                 k++;
476                         }
477                 }
478         }
479
480         nic->ufo_in_band_v = kmalloc((sizeof(u64) * size), GFP_KERNEL);
481         if (!nic->ufo_in_band_v)
482                 return -ENOMEM;
483
484         /* Allocation and initialization of RXDs in Rings */
485         size = 0;
486         for (i = 0; i < config->rx_ring_num; i++) {
487                 if (config->rx_cfg[i].num_rxd %
488                     (rxd_count[nic->rxd_mode] + 1)) {
489                         DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
490                         DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
491                                   i);
492                         DBG_PRINT(ERR_DBG, "RxDs per Block");
493                         return FAILURE;
494                 }
495                 size += config->rx_cfg[i].num_rxd;
496                 mac_control->rings[i].block_count =
497                         config->rx_cfg[i].num_rxd /
498                         (rxd_count[nic->rxd_mode] + 1 );
499                 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
500                         mac_control->rings[i].block_count;
501         }
502         if (nic->rxd_mode == RXD_MODE_1)
503                 size = (size * (sizeof(RxD1_t)));
504         else
505                 size = (size * (sizeof(RxD3_t)));
506         rx_sz = size;
507
508         for (i = 0; i < config->rx_ring_num; i++) {
509                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
510                 mac_control->rings[i].rx_curr_get_info.offset = 0;
511                 mac_control->rings[i].rx_curr_get_info.ring_len =
512                     config->rx_cfg[i].num_rxd - 1;
513                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
514                 mac_control->rings[i].rx_curr_put_info.offset = 0;
515                 mac_control->rings[i].rx_curr_put_info.ring_len =
516                     config->rx_cfg[i].num_rxd - 1;
517                 mac_control->rings[i].nic = nic;
518                 mac_control->rings[i].ring_no = i;
519
520                 blk_cnt = config->rx_cfg[i].num_rxd /
521                                 (rxd_count[nic->rxd_mode] + 1);
522                 /*  Allocating all the Rx blocks */
523                 for (j = 0; j < blk_cnt; j++) {
524                         rx_block_info_t *rx_blocks;
525                         int l;
526
527                         rx_blocks = &mac_control->rings[i].rx_blocks[j];
528                         size = SIZE_OF_BLOCK; //size is always page size
529                         tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
530                                                           &tmp_p_addr);
531                         if (tmp_v_addr == NULL) {
532                                 /*
533                                  * In case of failure, free_shared_mem()
534                                  * is called, which should free any
535                                  * memory that was alloced till the
536                                  * failure happened.
537                                  */
538                                 rx_blocks->block_virt_addr = tmp_v_addr;
539                                 return -ENOMEM;
540                         }
541                         memset(tmp_v_addr, 0, size);
542                         rx_blocks->block_virt_addr = tmp_v_addr;
543                         rx_blocks->block_dma_addr = tmp_p_addr;
544                         rx_blocks->rxds = kmalloc(sizeof(rxd_info_t)*
545                                                   rxd_count[nic->rxd_mode],
546                                                   GFP_KERNEL);
547                         for (l=0; l<rxd_count[nic->rxd_mode];l++) {
548                                 rx_blocks->rxds[l].virt_addr =
549                                         rx_blocks->block_virt_addr +
550                                         (rxd_size[nic->rxd_mode] * l);
551                                 rx_blocks->rxds[l].dma_addr =
552                                         rx_blocks->block_dma_addr +
553                                         (rxd_size[nic->rxd_mode] * l);
554                         }
555
556                         mac_control->rings[i].rx_blocks[j].block_virt_addr =
557                                 tmp_v_addr;
558                         mac_control->rings[i].rx_blocks[j].block_dma_addr =
559                                 tmp_p_addr;
560                 }
561                 /* Interlinking all Rx Blocks */
562                 for (j = 0; j < blk_cnt; j++) {
563                         tmp_v_addr =
564                                 mac_control->rings[i].rx_blocks[j].block_virt_addr;
565                         tmp_v_addr_next =
566                                 mac_control->rings[i].rx_blocks[(j + 1) %
567                                               blk_cnt].block_virt_addr;
568                         tmp_p_addr =
569                                 mac_control->rings[i].rx_blocks[j].block_dma_addr;
570                         tmp_p_addr_next =
571                                 mac_control->rings[i].rx_blocks[(j + 1) %
572                                               blk_cnt].block_dma_addr;
573
574                         pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
575                         pre_rxd_blk->reserved_2_pNext_RxD_block =
576                             (unsigned long) tmp_v_addr_next;
577                         pre_rxd_blk->pNext_RxD_Blk_physical =
578                             (u64) tmp_p_addr_next;
579                 }
580         }
581         if (nic->rxd_mode >= RXD_MODE_3A) {
582                 /*
583                  * Allocation of Storages for buffer addresses in 2BUFF mode
584                  * and the buffers as well.
585                  */
586                 for (i = 0; i < config->rx_ring_num; i++) {
587                         blk_cnt = config->rx_cfg[i].num_rxd /
588                            (rxd_count[nic->rxd_mode]+ 1);
589                         mac_control->rings[i].ba =
590                                 kmalloc((sizeof(buffAdd_t *) * blk_cnt),
591                                      GFP_KERNEL);
592                         if (!mac_control->rings[i].ba)
593                                 return -ENOMEM;
594                         for (j = 0; j < blk_cnt; j++) {
595                                 int k = 0;
596                                 mac_control->rings[i].ba[j] =
597                                         kmalloc((sizeof(buffAdd_t) *
598                                                 (rxd_count[nic->rxd_mode] + 1)),
599                                                 GFP_KERNEL);
600                                 if (!mac_control->rings[i].ba[j])
601                                         return -ENOMEM;
602                                 while (k != rxd_count[nic->rxd_mode]) {
603                                         ba = &mac_control->rings[i].ba[j][k];
604
605                                         ba->ba_0_org = (void *) kmalloc
606                                             (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
607                                         if (!ba->ba_0_org)
608                                                 return -ENOMEM;
609                                         tmp = (unsigned long)ba->ba_0_org;
610                                         tmp += ALIGN_SIZE;
611                                         tmp &= ~((unsigned long) ALIGN_SIZE);
612                                         ba->ba_0 = (void *) tmp;
613
614                                         ba->ba_1_org = (void *) kmalloc
615                                             (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
616                                         if (!ba->ba_1_org)
617                                                 return -ENOMEM;
618                                         tmp = (unsigned long) ba->ba_1_org;
619                                         tmp += ALIGN_SIZE;
620                                         tmp &= ~((unsigned long) ALIGN_SIZE);
621                                         ba->ba_1 = (void *) tmp;
622                                         k++;
623                                 }
624                         }
625                 }
626         }
627
628         /* Allocation and initialization of Statistics block */
629         size = sizeof(StatInfo_t);
630         mac_control->stats_mem = pci_alloc_consistent
631             (nic->pdev, size, &mac_control->stats_mem_phy);
632
633         if (!mac_control->stats_mem) {
634                 /*
635                  * In case of failure, free_shared_mem() is called, which
636                  * should free any memory that was alloced till the
637                  * failure happened.
638                  */
639                 return -ENOMEM;
640         }
641         mac_control->stats_mem_sz = size;
642
643         tmp_v_addr = mac_control->stats_mem;
644         mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
645         memset(tmp_v_addr, 0, size);
646         DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
647                   (unsigned long long) tmp_p_addr);
648
649         return SUCCESS;
650 }
651
652 /**
653  * free_shared_mem - Free the allocated Memory
654  * @nic:  Device private variable.
655  * Description: This function is to free all memory locations allocated by
656  * the init_shared_mem() function and return it to the kernel.
657  */
658
659 static void free_shared_mem(struct s2io_nic *nic)
660 {
661         int i, j, blk_cnt, size;
662         void *tmp_v_addr;
663         dma_addr_t tmp_p_addr;
664         mac_info_t *mac_control;
665         struct config_param *config;
666         int lst_size, lst_per_page;
667         struct net_device *dev = nic->dev;
668
669         if (!nic)
670                 return;
671
672         mac_control = &nic->mac_control;
673         config = &nic->config;
674
675         lst_size = (sizeof(TxD_t) * config->max_txds);
676         lst_per_page = PAGE_SIZE / lst_size;
677
678         for (i = 0; i < config->tx_fifo_num; i++) {
679                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
680                                                 lst_per_page);
681                 for (j = 0; j < page_num; j++) {
682                         int mem_blks = (j * lst_per_page);
683                         if (!mac_control->fifos[i].list_info)
684                                 return; 
685                         if (!mac_control->fifos[i].list_info[mem_blks].
686                                  list_virt_addr)
687                                 break;
688                         pci_free_consistent(nic->pdev, PAGE_SIZE,
689                                             mac_control->fifos[i].
690                                             list_info[mem_blks].
691                                             list_virt_addr,
692                                             mac_control->fifos[i].
693                                             list_info[mem_blks].
694                                             list_phy_addr);
695                 }
696                 /* If we got a zero DMA address during allocation,
697                  * free the page now
698                  */
699                 if (mac_control->zerodma_virt_addr) {
700                         pci_free_consistent(nic->pdev, PAGE_SIZE,
701                                             mac_control->zerodma_virt_addr,
702                                             (dma_addr_t)0);
703                         DBG_PRINT(INIT_DBG, 
704                                 "%s: Freeing TxDL with zero DMA addr. ",
705                                 dev->name);
706                         DBG_PRINT(INIT_DBG, "Virtual address %p\n",
707                                 mac_control->zerodma_virt_addr);
708                 }
709                 kfree(mac_control->fifos[i].list_info);
710         }
711
712         size = SIZE_OF_BLOCK;
713         for (i = 0; i < config->rx_ring_num; i++) {
714                 blk_cnt = mac_control->rings[i].block_count;
715                 for (j = 0; j < blk_cnt; j++) {
716                         tmp_v_addr = mac_control->rings[i].rx_blocks[j].
717                                 block_virt_addr;
718                         tmp_p_addr = mac_control->rings[i].rx_blocks[j].
719                                 block_dma_addr;
720                         if (tmp_v_addr == NULL)
721                                 break;
722                         pci_free_consistent(nic->pdev, size,
723                                             tmp_v_addr, tmp_p_addr);
724                         kfree(mac_control->rings[i].rx_blocks[j].rxds);
725                 }
726         }
727
728         if (nic->rxd_mode >= RXD_MODE_3A) {
729                 /* Freeing buffer storage addresses in 2BUFF mode. */
730                 for (i = 0; i < config->rx_ring_num; i++) {
731                         blk_cnt = config->rx_cfg[i].num_rxd /
732                             (rxd_count[nic->rxd_mode] + 1);
733                         for (j = 0; j < blk_cnt; j++) {
734                                 int k = 0;
735                                 if (!mac_control->rings[i].ba[j])
736                                         continue;
737                                 while (k != rxd_count[nic->rxd_mode]) {
738                                         buffAdd_t *ba =
739                                                 &mac_control->rings[i].ba[j][k];
740                                         kfree(ba->ba_0_org);
741                                         kfree(ba->ba_1_org);
742                                         k++;
743                                 }
744                                 kfree(mac_control->rings[i].ba[j]);
745                         }
746                         kfree(mac_control->rings[i].ba);
747                 }
748         }
749
750         if (mac_control->stats_mem) {
751                 pci_free_consistent(nic->pdev,
752                                     mac_control->stats_mem_sz,
753                                     mac_control->stats_mem,
754                                     mac_control->stats_mem_phy);
755         }
756         if (nic->ufo_in_band_v)
757                 kfree(nic->ufo_in_band_v);
758 }
759
760 /**
761  * s2io_verify_pci_mode -
762  */
763
764 static int s2io_verify_pci_mode(nic_t *nic)
765 {
766         XENA_dev_config_t __iomem *bar0 = nic->bar0;
767         register u64 val64 = 0;
768         int     mode;
769
770         val64 = readq(&bar0->pci_mode);
771         mode = (u8)GET_PCI_MODE(val64);
772
773         if ( val64 & PCI_MODE_UNKNOWN_MODE)
774                 return -1;      /* Unknown PCI mode */
775         return mode;
776 }
777
778
779 /**
780  * s2io_print_pci_mode -
781  */
782 static int s2io_print_pci_mode(nic_t *nic)
783 {
784         XENA_dev_config_t __iomem *bar0 = nic->bar0;
785         register u64 val64 = 0;
786         int     mode;
787         struct config_param *config = &nic->config;
788
789         val64 = readq(&bar0->pci_mode);
790         mode = (u8)GET_PCI_MODE(val64);
791
792         if ( val64 & PCI_MODE_UNKNOWN_MODE)
793                 return -1;      /* Unknown PCI mode */
794
795         if (val64 & PCI_MODE_32_BITS) {
796                 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
797         } else {
798                 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
799         }
800
801         switch(mode) {
802                 case PCI_MODE_PCI_33:
803                         DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
804                         config->bus_speed = 33;
805                         break;
806                 case PCI_MODE_PCI_66:
807                         DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
808                         config->bus_speed = 133;
809                         break;
810                 case PCI_MODE_PCIX_M1_66:
811                         DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
812                         config->bus_speed = 133; /* Herc doubles the clock rate */
813                         break;
814                 case PCI_MODE_PCIX_M1_100:
815                         DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
816                         config->bus_speed = 200;
817                         break;
818                 case PCI_MODE_PCIX_M1_133:
819                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
820                         config->bus_speed = 266;
821                         break;
822                 case PCI_MODE_PCIX_M2_66:
823                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
824                         config->bus_speed = 133;
825                         break;
826                 case PCI_MODE_PCIX_M2_100:
827                         DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
828                         config->bus_speed = 200;
829                         break;
830                 case PCI_MODE_PCIX_M2_133:
831                         DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
832                         config->bus_speed = 266;
833                         break;
834                 default:
835                         return -1;      /* Unsupported bus speed */
836         }
837
838         return mode;
839 }
840
841 /**
842  *  init_nic - Initialization of hardware
843  *  @nic: device peivate variable
844  *  Description: The function sequentially configures every block
845  *  of the H/W from their reset values.
846  *  Return Value:  SUCCESS on success and
847  *  '-1' on failure (endian settings incorrect).
848  */
849
850 static int init_nic(struct s2io_nic *nic)
851 {
852         XENA_dev_config_t __iomem *bar0 = nic->bar0;
853         struct net_device *dev = nic->dev;
854         register u64 val64 = 0;
855         void __iomem *add;
856         u32 time;
857         int i, j;
858         mac_info_t *mac_control;
859         struct config_param *config;
860         int mdio_cnt = 0, dtx_cnt = 0;
861         unsigned long long mem_share;
862         int mem_size;
863
864         mac_control = &nic->mac_control;
865         config = &nic->config;
866
867         /* to set the swapper controle on the card */
868         if(s2io_set_swapper(nic)) {
869                 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
870                 return -1;
871         }
872
873         /*
874          * Herc requires EOI to be removed from reset before XGXS, so..
875          */
876         if (nic->device_type & XFRAME_II_DEVICE) {
877                 val64 = 0xA500000000ULL;
878                 writeq(val64, &bar0->sw_reset);
879                 msleep(500);
880                 val64 = readq(&bar0->sw_reset);
881         }
882
883         /* Remove XGXS from reset state */
884         val64 = 0;
885         writeq(val64, &bar0->sw_reset);
886         msleep(500);
887         val64 = readq(&bar0->sw_reset);
888
889         /*  Enable Receiving broadcasts */
890         add = &bar0->mac_cfg;
891         val64 = readq(&bar0->mac_cfg);
892         val64 |= MAC_RMAC_BCAST_ENABLE;
893         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
894         writel((u32) val64, add);
895         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
896         writel((u32) (val64 >> 32), (add + 4));
897
898         /* Read registers in all blocks */
899         val64 = readq(&bar0->mac_int_mask);
900         val64 = readq(&bar0->mc_int_mask);
901         val64 = readq(&bar0->xgxs_int_mask);
902
903         /*  Set MTU */
904         val64 = dev->mtu;
905         writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
906
907         /*
908          * Configuring the XAUI Interface of Xena.
909          * ***************************************
910          * To Configure the Xena's XAUI, one has to write a series
911          * of 64 bit values into two registers in a particular
912          * sequence. Hence a macro 'SWITCH_SIGN' has been defined
913          * which will be defined in the array of configuration values
914          * (xena_dtx_cfg & xena_mdio_cfg) at appropriate places
915          * to switch writing from one regsiter to another. We continue
916          * writing these values until we encounter the 'END_SIGN' macro.
917          * For example, After making a series of 21 writes into
918          * dtx_control register the 'SWITCH_SIGN' appears and hence we
919          * start writing into mdio_control until we encounter END_SIGN.
920          */
921         if (nic->device_type & XFRAME_II_DEVICE) {
922                 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
923                         SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
924                                           &bar0->dtx_control, UF);
925                         if (dtx_cnt & 0x1)
926                                 msleep(1); /* Necessary!! */
927                         dtx_cnt++;
928                 }
929         } else {
930                 while (1) {
931                       dtx_cfg:
932                         while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
933                                 if (xena_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
934                                         dtx_cnt++;
935                                         goto mdio_cfg;
936                                 }
937                                 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
938                                                   &bar0->dtx_control, UF);
939                                 val64 = readq(&bar0->dtx_control);
940                                 dtx_cnt++;
941                         }
942                       mdio_cfg:
943                         while (xena_mdio_cfg[mdio_cnt] != END_SIGN) {
944                                 if (xena_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
945                                         mdio_cnt++;
946                                         goto dtx_cfg;
947                                 }
948                                 SPECIAL_REG_WRITE(xena_mdio_cfg[mdio_cnt],
949                                                   &bar0->mdio_control, UF);
950                                 val64 = readq(&bar0->mdio_control);
951                                 mdio_cnt++;
952                         }
953                         if ((xena_dtx_cfg[dtx_cnt] == END_SIGN) &&
954                             (xena_mdio_cfg[mdio_cnt] == END_SIGN)) {
955                                 break;
956                         } else {
957                                 goto dtx_cfg;
958                         }
959                 }
960         }
961
962         /*  Tx DMA Initialization */
963         val64 = 0;
964         writeq(val64, &bar0->tx_fifo_partition_0);
965         writeq(val64, &bar0->tx_fifo_partition_1);
966         writeq(val64, &bar0->tx_fifo_partition_2);
967         writeq(val64, &bar0->tx_fifo_partition_3);
968
969
970         for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
971                 val64 |=
972                     vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
973                          13) | vBIT(config->tx_cfg[i].fifo_priority,
974                                     ((i * 32) + 5), 3);
975
976                 if (i == (config->tx_fifo_num - 1)) {
977                         if (i % 2 == 0)
978                                 i++;
979                 }
980
981                 switch (i) {
982                 case 1:
983                         writeq(val64, &bar0->tx_fifo_partition_0);
984                         val64 = 0;
985                         break;
986                 case 3:
987                         writeq(val64, &bar0->tx_fifo_partition_1);
988                         val64 = 0;
989                         break;
990                 case 5:
991                         writeq(val64, &bar0->tx_fifo_partition_2);
992                         val64 = 0;
993                         break;
994                 case 7:
995                         writeq(val64, &bar0->tx_fifo_partition_3);
996                         break;
997                 }
998         }
999
1000         /* Enable Tx FIFO partition 0. */
1001         val64 = readq(&bar0->tx_fifo_partition_0);
1002         val64 |= BIT(0);        /* To enable the FIFO partition. */
1003         writeq(val64, &bar0->tx_fifo_partition_0);
1004
1005         /*
1006          * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1007          * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1008          */
1009         if ((nic->device_type == XFRAME_I_DEVICE) &&
1010                 (get_xena_rev_id(nic->pdev) < 4))
1011                 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1012
1013         val64 = readq(&bar0->tx_fifo_partition_0);
1014         DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1015                   &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1016
1017         /*
1018          * Initialization of Tx_PA_CONFIG register to ignore packet
1019          * integrity checking.
1020          */
1021         val64 = readq(&bar0->tx_pa_cfg);
1022         val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1023             TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1024         writeq(val64, &bar0->tx_pa_cfg);
1025
1026         /* Rx DMA intialization. */
1027         val64 = 0;
1028         for (i = 0; i < config->rx_ring_num; i++) {
1029                 val64 |=
1030                     vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1031                          3);
1032         }
1033         writeq(val64, &bar0->rx_queue_priority);
1034
1035         /*
1036          * Allocating equal share of memory to all the
1037          * configured Rings.
1038          */
1039         val64 = 0;
1040         if (nic->device_type & XFRAME_II_DEVICE)
1041                 mem_size = 32;
1042         else
1043                 mem_size = 64;
1044
1045         for (i = 0; i < config->rx_ring_num; i++) {
1046                 switch (i) {
1047                 case 0:
1048                         mem_share = (mem_size / config->rx_ring_num +
1049                                      mem_size % config->rx_ring_num);
1050                         val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1051                         continue;
1052                 case 1:
1053                         mem_share = (mem_size / config->rx_ring_num);
1054                         val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1055                         continue;
1056                 case 2:
1057                         mem_share = (mem_size / config->rx_ring_num);
1058                         val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1059                         continue;
1060                 case 3:
1061                         mem_share = (mem_size / config->rx_ring_num);
1062                         val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1063                         continue;
1064                 case 4:
1065                         mem_share = (mem_size / config->rx_ring_num);
1066                         val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1067                         continue;
1068                 case 5:
1069                         mem_share = (mem_size / config->rx_ring_num);
1070                         val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1071                         continue;
1072                 case 6:
1073                         mem_share = (mem_size / config->rx_ring_num);
1074                         val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1075                         continue;
1076                 case 7:
1077                         mem_share = (mem_size / config->rx_ring_num);
1078                         val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1079                         continue;
1080                 }
1081         }
1082         writeq(val64, &bar0->rx_queue_cfg);
1083
1084         /*
1085          * Filling Tx round robin registers
1086          * as per the number of FIFOs
1087          */
1088         switch (config->tx_fifo_num) {
1089         case 1:
1090                 val64 = 0x0000000000000000ULL;
1091                 writeq(val64, &bar0->tx_w_round_robin_0);
1092                 writeq(val64, &bar0->tx_w_round_robin_1);
1093                 writeq(val64, &bar0->tx_w_round_robin_2);
1094                 writeq(val64, &bar0->tx_w_round_robin_3);
1095                 writeq(val64, &bar0->tx_w_round_robin_4);
1096                 break;
1097         case 2:
1098                 val64 = 0x0000010000010000ULL;
1099                 writeq(val64, &bar0->tx_w_round_robin_0);
1100                 val64 = 0x0100000100000100ULL;
1101                 writeq(val64, &bar0->tx_w_round_robin_1);
1102                 val64 = 0x0001000001000001ULL;
1103                 writeq(val64, &bar0->tx_w_round_robin_2);
1104                 val64 = 0x0000010000010000ULL;
1105                 writeq(val64, &bar0->tx_w_round_robin_3);
1106                 val64 = 0x0100000000000000ULL;
1107                 writeq(val64, &bar0->tx_w_round_robin_4);
1108                 break;
1109         case 3:
1110                 val64 = 0x0001000102000001ULL;
1111                 writeq(val64, &bar0->tx_w_round_robin_0);
1112                 val64 = 0x0001020000010001ULL;
1113                 writeq(val64, &bar0->tx_w_round_robin_1);
1114                 val64 = 0x0200000100010200ULL;
1115                 writeq(val64, &bar0->tx_w_round_robin_2);
1116                 val64 = 0x0001000102000001ULL;
1117                 writeq(val64, &bar0->tx_w_round_robin_3);
1118                 val64 = 0x0001020000000000ULL;
1119                 writeq(val64, &bar0->tx_w_round_robin_4);
1120                 break;
1121         case 4:
1122                 val64 = 0x0001020300010200ULL;
1123                 writeq(val64, &bar0->tx_w_round_robin_0);
1124                 val64 = 0x0100000102030001ULL;
1125                 writeq(val64, &bar0->tx_w_round_robin_1);
1126                 val64 = 0x0200010000010203ULL;
1127                 writeq(val64, &bar0->tx_w_round_robin_2);
1128                 val64 = 0x0001020001000001ULL;
1129                 writeq(val64, &bar0->tx_w_round_robin_3);
1130                 val64 = 0x0203000100000000ULL;
1131                 writeq(val64, &bar0->tx_w_round_robin_4);
1132                 break;
1133         case 5:
1134                 val64 = 0x0001000203000102ULL;
1135                 writeq(val64, &bar0->tx_w_round_robin_0);
1136                 val64 = 0x0001020001030004ULL;
1137                 writeq(val64, &bar0->tx_w_round_robin_1);
1138                 val64 = 0x0001000203000102ULL;
1139                 writeq(val64, &bar0->tx_w_round_robin_2);
1140                 val64 = 0x0001020001030004ULL;
1141                 writeq(val64, &bar0->tx_w_round_robin_3);
1142                 val64 = 0x0001000000000000ULL;
1143                 writeq(val64, &bar0->tx_w_round_robin_4);
1144                 break;
1145         case 6:
1146                 val64 = 0x0001020304000102ULL;
1147                 writeq(val64, &bar0->tx_w_round_robin_0);
1148                 val64 = 0x0304050001020001ULL;
1149                 writeq(val64, &bar0->tx_w_round_robin_1);
1150                 val64 = 0x0203000100000102ULL;
1151                 writeq(val64, &bar0->tx_w_round_robin_2);
1152                 val64 = 0x0304000102030405ULL;
1153                 writeq(val64, &bar0->tx_w_round_robin_3);
1154                 val64 = 0x0001000200000000ULL;
1155                 writeq(val64, &bar0->tx_w_round_robin_4);
1156                 break;
1157         case 7:
1158                 val64 = 0x0001020001020300ULL;
1159                 writeq(val64, &bar0->tx_w_round_robin_0);
1160                 val64 = 0x0102030400010203ULL;
1161                 writeq(val64, &bar0->tx_w_round_robin_1);
1162                 val64 = 0x0405060001020001ULL;
1163                 writeq(val64, &bar0->tx_w_round_robin_2);
1164                 val64 = 0x0304050000010200ULL;
1165                 writeq(val64, &bar0->tx_w_round_robin_3);
1166                 val64 = 0x0102030000000000ULL;
1167                 writeq(val64, &bar0->tx_w_round_robin_4);
1168                 break;
1169         case 8:
1170                 val64 = 0x0001020300040105ULL;
1171                 writeq(val64, &bar0->tx_w_round_robin_0);
1172                 val64 = 0x0200030106000204ULL;
1173                 writeq(val64, &bar0->tx_w_round_robin_1);
1174                 val64 = 0x0103000502010007ULL;
1175                 writeq(val64, &bar0->tx_w_round_robin_2);
1176                 val64 = 0x0304010002060500ULL;
1177                 writeq(val64, &bar0->tx_w_round_robin_3);
1178                 val64 = 0x0103020400000000ULL;
1179                 writeq(val64, &bar0->tx_w_round_robin_4);
1180                 break;
1181         }
1182
1183         /* Filling the Rx round robin registers as per the
1184          * number of Rings and steering based on QoS.
1185          */
1186         switch (config->rx_ring_num) {
1187         case 1:
1188                 val64 = 0x8080808080808080ULL;
1189                 writeq(val64, &bar0->rts_qos_steering);
1190                 break;
1191         case 2:
1192                 val64 = 0x0000010000010000ULL;
1193                 writeq(val64, &bar0->rx_w_round_robin_0);
1194                 val64 = 0x0100000100000100ULL;
1195                 writeq(val64, &bar0->rx_w_round_robin_1);
1196                 val64 = 0x0001000001000001ULL;
1197                 writeq(val64, &bar0->rx_w_round_robin_2);
1198                 val64 = 0x0000010000010000ULL;
1199                 writeq(val64, &bar0->rx_w_round_robin_3);
1200                 val64 = 0x0100000000000000ULL;
1201                 writeq(val64, &bar0->rx_w_round_robin_4);
1202
1203                 val64 = 0x8080808040404040ULL;
1204                 writeq(val64, &bar0->rts_qos_steering);
1205                 break;
1206         case 3:
1207                 val64 = 0x0001000102000001ULL;
1208                 writeq(val64, &bar0->rx_w_round_robin_0);
1209                 val64 = 0x0001020000010001ULL;
1210                 writeq(val64, &bar0->rx_w_round_robin_1);
1211                 val64 = 0x0200000100010200ULL;
1212                 writeq(val64, &bar0->rx_w_round_robin_2);
1213                 val64 = 0x0001000102000001ULL;
1214                 writeq(val64, &bar0->rx_w_round_robin_3);
1215                 val64 = 0x0001020000000000ULL;
1216                 writeq(val64, &bar0->rx_w_round_robin_4);
1217
1218                 val64 = 0x8080804040402020ULL;
1219                 writeq(val64, &bar0->rts_qos_steering);
1220                 break;
1221         case 4:
1222                 val64 = 0x0001020300010200ULL;
1223                 writeq(val64, &bar0->rx_w_round_robin_0);
1224                 val64 = 0x0100000102030001ULL;
1225                 writeq(val64, &bar0->rx_w_round_robin_1);
1226                 val64 = 0x0200010000010203ULL;
1227                 writeq(val64, &bar0->rx_w_round_robin_2);
1228                 val64 = 0x0001020001000001ULL;  
1229                 writeq(val64, &bar0->rx_w_round_robin_3);
1230                 val64 = 0x0203000100000000ULL;
1231                 writeq(val64, &bar0->rx_w_round_robin_4);
1232
1233                 val64 = 0x8080404020201010ULL;
1234                 writeq(val64, &bar0->rts_qos_steering);
1235                 break;
1236         case 5:
1237                 val64 = 0x0001000203000102ULL;
1238                 writeq(val64, &bar0->rx_w_round_robin_0);
1239                 val64 = 0x0001020001030004ULL;
1240                 writeq(val64, &bar0->rx_w_round_robin_1);
1241                 val64 = 0x0001000203000102ULL;
1242                 writeq(val64, &bar0->rx_w_round_robin_2);
1243                 val64 = 0x0001020001030004ULL;
1244                 writeq(val64, &bar0->rx_w_round_robin_3);
1245                 val64 = 0x0001000000000000ULL;
1246                 writeq(val64, &bar0->rx_w_round_robin_4);
1247
1248                 val64 = 0x8080404020201008ULL;
1249                 writeq(val64, &bar0->rts_qos_steering);
1250                 break;
1251         case 6:
1252                 val64 = 0x0001020304000102ULL;
1253                 writeq(val64, &bar0->rx_w_round_robin_0);
1254                 val64 = 0x0304050001020001ULL;
1255                 writeq(val64, &bar0->rx_w_round_robin_1);
1256                 val64 = 0x0203000100000102ULL;
1257                 writeq(val64, &bar0->rx_w_round_robin_2);
1258                 val64 = 0x0304000102030405ULL;
1259                 writeq(val64, &bar0->rx_w_round_robin_3);
1260                 val64 = 0x0001000200000000ULL;
1261                 writeq(val64, &bar0->rx_w_round_robin_4);
1262
1263                 val64 = 0x8080404020100804ULL;
1264                 writeq(val64, &bar0->rts_qos_steering);
1265                 break;
1266         case 7:
1267                 val64 = 0x0001020001020300ULL;
1268                 writeq(val64, &bar0->rx_w_round_robin_0);
1269                 val64 = 0x0102030400010203ULL;
1270                 writeq(val64, &bar0->rx_w_round_robin_1);
1271                 val64 = 0x0405060001020001ULL;
1272                 writeq(val64, &bar0->rx_w_round_robin_2);
1273                 val64 = 0x0304050000010200ULL;
1274                 writeq(val64, &bar0->rx_w_round_robin_3);
1275                 val64 = 0x0102030000000000ULL;
1276                 writeq(val64, &bar0->rx_w_round_robin_4);
1277
1278                 val64 = 0x8080402010080402ULL;
1279                 writeq(val64, &bar0->rts_qos_steering);
1280                 break;
1281         case 8:
1282                 val64 = 0x0001020300040105ULL;
1283                 writeq(val64, &bar0->rx_w_round_robin_0);
1284                 val64 = 0x0200030106000204ULL;
1285                 writeq(val64, &bar0->rx_w_round_robin_1);
1286                 val64 = 0x0103000502010007ULL;
1287                 writeq(val64, &bar0->rx_w_round_robin_2);
1288                 val64 = 0x0304010002060500ULL;
1289                 writeq(val64, &bar0->rx_w_round_robin_3);
1290                 val64 = 0x0103020400000000ULL;
1291                 writeq(val64, &bar0->rx_w_round_robin_4);
1292
1293                 val64 = 0x8040201008040201ULL;
1294                 writeq(val64, &bar0->rts_qos_steering);
1295                 break;
1296         }
1297
1298         /* UDP Fix */
1299         val64 = 0;
1300         for (i = 0; i < 8; i++)
1301                 writeq(val64, &bar0->rts_frm_len_n[i]);
1302
1303         /* Set the default rts frame length for the rings configured */
1304         val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1305         for (i = 0 ; i < config->rx_ring_num ; i++)
1306                 writeq(val64, &bar0->rts_frm_len_n[i]);
1307
1308         /* Set the frame length for the configured rings
1309          * desired by the user
1310          */
1311         for (i = 0; i < config->rx_ring_num; i++) {
1312                 /* If rts_frm_len[i] == 0 then it is assumed that user not
1313                  * specified frame length steering.
1314                  * If the user provides the frame length then program
1315                  * the rts_frm_len register for those values or else
1316                  * leave it as it is.
1317                  */
1318                 if (rts_frm_len[i] != 0) {
1319                         writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1320                                 &bar0->rts_frm_len_n[i]);
1321                 }
1322         }
1323
1324         /* Program statistics memory */
1325         writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1326
1327         if (nic->device_type == XFRAME_II_DEVICE) {
1328                 val64 = STAT_BC(0x320);
1329                 writeq(val64, &bar0->stat_byte_cnt);
1330         }
1331
1332         /*
1333          * Initializing the sampling rate for the device to calculate the
1334          * bandwidth utilization.
1335          */
1336         val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1337             MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1338         writeq(val64, &bar0->mac_link_util);
1339
1340
1341         /*
1342          * Initializing the Transmit and Receive Traffic Interrupt
1343          * Scheme.
1344          */
1345         /*
1346          * TTI Initialization. Default Tx timer gets us about
1347          * 250 interrupts per sec. Continuous interrupts are enabled
1348          * by default.
1349          */
1350         if (nic->device_type == XFRAME_II_DEVICE) {
1351                 int count = (nic->config.bus_speed * 125)/2;
1352                 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1353         } else {
1354
1355                 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1356         }
1357         val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1358             TTI_DATA1_MEM_TX_URNG_B(0x10) |
1359             TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1360                 if (use_continuous_tx_intrs)
1361                         val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1362         writeq(val64, &bar0->tti_data1_mem);
1363
1364         val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1365             TTI_DATA2_MEM_TX_UFC_B(0x20) |
1366             TTI_DATA2_MEM_TX_UFC_C(0x70) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1367         writeq(val64, &bar0->tti_data2_mem);
1368
1369         val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1370         writeq(val64, &bar0->tti_command_mem);
1371
1372         /*
1373          * Once the operation completes, the Strobe bit of the command
1374          * register will be reset. We poll for this particular condition
1375          * We wait for a maximum of 500ms for the operation to complete,
1376          * if it's not complete by then we return error.
1377          */
1378         time = 0;
1379         while (TRUE) {
1380                 val64 = readq(&bar0->tti_command_mem);
1381                 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1382                         break;
1383                 }
1384                 if (time > 10) {
1385                         DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1386                                   dev->name);
1387                         return -1;
1388                 }
1389                 msleep(50);
1390                 time++;
1391         }
1392
1393         if (nic->config.bimodal) {
1394                 int k = 0;
1395                 for (k = 0; k < config->rx_ring_num; k++) {
1396                         val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1397                         val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1398                         writeq(val64, &bar0->tti_command_mem);
1399
1400                 /*
1401                  * Once the operation completes, the Strobe bit of the command
1402                  * register will be reset. We poll for this particular condition
1403                  * We wait for a maximum of 500ms for the operation to complete,
1404                  * if it's not complete by then we return error.
1405                 */
1406                         time = 0;
1407                         while (TRUE) {
1408                                 val64 = readq(&bar0->tti_command_mem);
1409                                 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1410                                         break;
1411                                 }
1412                                 if (time > 10) {
1413                                         DBG_PRINT(ERR_DBG,
1414                                                 "%s: TTI init Failed\n",
1415                                         dev->name);
1416                                         return -1;
1417                                 }
1418                                 time++;
1419                                 msleep(50);
1420                         }
1421                 }
1422         } else {
1423
1424                 /* RTI Initialization */
1425                 if (nic->device_type == XFRAME_II_DEVICE) {
1426                         /*
1427                          * Programmed to generate Apprx 500 Intrs per
1428                          * second
1429                          */
1430                         int count = (nic->config.bus_speed * 125)/4;
1431                         val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1432                 } else {
1433                         val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1434                 }
1435                 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1436                     RTI_DATA1_MEM_RX_URNG_B(0x10) |
1437                     RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1438
1439                 writeq(val64, &bar0->rti_data1_mem);
1440
1441                 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1442                     RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1443                 if (nic->intr_type == MSI_X)
1444                     val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1445                                 RTI_DATA2_MEM_RX_UFC_D(0x40));
1446                 else
1447                     val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1448                                 RTI_DATA2_MEM_RX_UFC_D(0x80));
1449                 writeq(val64, &bar0->rti_data2_mem);
1450
1451                 for (i = 0; i < config->rx_ring_num; i++) {
1452                         val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1453                                         | RTI_CMD_MEM_OFFSET(i);
1454                         writeq(val64, &bar0->rti_command_mem);
1455
1456                         /*
1457                          * Once the operation completes, the Strobe bit of the
1458                          * command register will be reset. We poll for this
1459                          * particular condition. We wait for a maximum of 500ms
1460                          * for the operation to complete, if it's not complete
1461                          * by then we return error.
1462                          */
1463                         time = 0;
1464                         while (TRUE) {
1465                                 val64 = readq(&bar0->rti_command_mem);
1466                                 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1467                                         break;
1468                                 }
1469                                 if (time > 10) {
1470                                         DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1471                                                   dev->name);
1472                                         return -1;
1473                                 }
1474                                 time++;
1475                                 msleep(50);
1476                         }
1477                 }
1478         }
1479
1480         /*
1481          * Initializing proper values as Pause threshold into all
1482          * the 8 Queues on Rx side.
1483          */
1484         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1485         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1486
1487         /* Disable RMAC PAD STRIPPING */
1488         add = &bar0->mac_cfg;
1489         val64 = readq(&bar0->mac_cfg);
1490         val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1491         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1492         writel((u32) (val64), add);
1493         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1494         writel((u32) (val64 >> 32), (add + 4));
1495         val64 = readq(&bar0->mac_cfg);
1496
1497         /* Enable FCS stripping by adapter */
1498         add = &bar0->mac_cfg;
1499         val64 = readq(&bar0->mac_cfg);
1500         val64 |= MAC_CFG_RMAC_STRIP_FCS;
1501         if (nic->device_type == XFRAME_II_DEVICE)
1502                 writeq(val64, &bar0->mac_cfg);
1503         else {
1504                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1505                 writel((u32) (val64), add);
1506                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1507                 writel((u32) (val64 >> 32), (add + 4));
1508         }
1509
1510         /*
1511          * Set the time value to be inserted in the pause frame
1512          * generated by xena.
1513          */
1514         val64 = readq(&bar0->rmac_pause_cfg);
1515         val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1516         val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1517         writeq(val64, &bar0->rmac_pause_cfg);
1518
1519         /*
1520          * Set the Threshold Limit for Generating the pause frame
1521          * If the amount of data in any Queue exceeds ratio of
1522          * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1523          * pause frame is generated
1524          */
1525         val64 = 0;
1526         for (i = 0; i < 4; i++) {
1527                 val64 |=
1528                     (((u64) 0xFF00 | nic->mac_control.
1529                       mc_pause_threshold_q0q3)
1530                      << (i * 2 * 8));
1531         }
1532         writeq(val64, &bar0->mc_pause_thresh_q0q3);
1533
1534         val64 = 0;
1535         for (i = 0; i < 4; i++) {
1536                 val64 |=
1537                     (((u64) 0xFF00 | nic->mac_control.
1538                       mc_pause_threshold_q4q7)
1539                      << (i * 2 * 8));
1540         }
1541         writeq(val64, &bar0->mc_pause_thresh_q4q7);
1542
1543         /*
1544          * TxDMA will stop Read request if the number of read split has
1545          * exceeded the limit pointed by shared_splits
1546          */
1547         val64 = readq(&bar0->pic_control);
1548         val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1549         writeq(val64, &bar0->pic_control);
1550
1551         if (nic->config.bus_speed == 266) {
1552                 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1553                 writeq(0x0, &bar0->read_retry_delay);
1554                 writeq(0x0, &bar0->write_retry_delay);
1555         }
1556
1557         /*
1558          * Programming the Herc to split every write transaction
1559          * that does not start on an ADB to reduce disconnects.
1560          */
1561         if (nic->device_type == XFRAME_II_DEVICE) {
1562                 val64 = EXT_REQ_EN | MISC_LINK_STABILITY_PRD(3);
1563                 writeq(val64, &bar0->misc_control);
1564                 val64 = readq(&bar0->pic_control2);
1565                 val64 &= ~(BIT(13)|BIT(14)|BIT(15));
1566                 writeq(val64, &bar0->pic_control2);
1567         }
1568
1569         /* Setting Link stability period to 64 ms */ 
1570         if (nic->device_type == XFRAME_II_DEVICE) {
1571                 val64 = MISC_LINK_STABILITY_PRD(3);
1572                 writeq(val64, &bar0->misc_control);
1573         }
1574
1575         return SUCCESS;
1576 }
1577 #define LINK_UP_DOWN_INTERRUPT          1
1578 #define MAC_RMAC_ERR_TIMER              2
1579
1580 static int s2io_link_fault_indication(nic_t *nic)
1581 {
1582         if (nic->intr_type != INTA)
1583                 return MAC_RMAC_ERR_TIMER;
1584         if (nic->device_type == XFRAME_II_DEVICE)
1585                 return LINK_UP_DOWN_INTERRUPT;
1586         else
1587                 return MAC_RMAC_ERR_TIMER;
1588 }
1589
1590 /**
1591  *  en_dis_able_nic_intrs - Enable or Disable the interrupts
1592  *  @nic: device private variable,
1593  *  @mask: A mask indicating which Intr block must be modified and,
1594  *  @flag: A flag indicating whether to enable or disable the Intrs.
1595  *  Description: This function will either disable or enable the interrupts
1596  *  depending on the flag argument. The mask argument can be used to
1597  *  enable/disable any Intr block.
1598  *  Return Value: NONE.
1599  */
1600
1601 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1602 {
1603         XENA_dev_config_t __iomem *bar0 = nic->bar0;
1604         register u64 val64 = 0, temp64 = 0;
1605
1606         /*  Top level interrupt classification */
1607         /*  PIC Interrupts */
1608         if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1609                 /*  Enable PIC Intrs in the general intr mask register */
1610                 val64 = TXPIC_INT_M | PIC_RX_INT_M;
1611                 if (flag == ENABLE_INTRS) {
1612                         temp64 = readq(&bar0->general_int_mask);
1613                         temp64 &= ~((u64) val64);
1614                         writeq(temp64, &bar0->general_int_mask);
1615                         /*
1616                          * If Hercules adapter enable GPIO otherwise
1617                          * disabled all PCIX, Flash, MDIO, IIC and GPIO
1618                          * interrupts for now.
1619                          * TODO
1620                          */
1621                         if (s2io_link_fault_indication(nic) ==
1622                                         LINK_UP_DOWN_INTERRUPT ) {
1623                                 temp64 = readq(&bar0->pic_int_mask);
1624                                 temp64 &= ~((u64) PIC_INT_GPIO);
1625                                 writeq(temp64, &bar0->pic_int_mask);
1626                                 temp64 = readq(&bar0->gpio_int_mask);
1627                                 temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP);
1628                                 writeq(temp64, &bar0->gpio_int_mask);
1629                         } else {
1630                                 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1631                         }
1632                         /*
1633                          * No MSI Support is available presently, so TTI and
1634                          * RTI interrupts are also disabled.
1635                          */
1636                 } else if (flag == DISABLE_INTRS) {
1637                         /*
1638                          * Disable PIC Intrs in the general
1639                          * intr mask register
1640                          */
1641                         writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1642                         temp64 = readq(&bar0->general_int_mask);
1643                         val64 |= temp64;
1644                         writeq(val64, &bar0->general_int_mask);
1645                 }
1646         }
1647
1648         /*  DMA Interrupts */
1649         /*  Enabling/Disabling Tx DMA interrupts */
1650         if (mask & TX_DMA_INTR) {
1651                 /* Enable TxDMA Intrs in the general intr mask register */
1652                 val64 = TXDMA_INT_M;
1653                 if (flag == ENABLE_INTRS) {
1654                         temp64 = readq(&bar0->general_int_mask);
1655                         temp64 &= ~((u64) val64);
1656                         writeq(temp64, &bar0->general_int_mask);
1657                         /*
1658                          * Keep all interrupts other than PFC interrupt
1659                          * and PCC interrupt disabled in DMA level.
1660                          */
1661                         val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1662                                                       TXDMA_PCC_INT_M);
1663                         writeq(val64, &bar0->txdma_int_mask);
1664                         /*
1665                          * Enable only the MISC error 1 interrupt in PFC block
1666                          */
1667                         val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1668                         writeq(val64, &bar0->pfc_err_mask);
1669                         /*
1670                          * Enable only the FB_ECC error interrupt in PCC block
1671                          */
1672                         val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1673                         writeq(val64, &bar0->pcc_err_mask);
1674                 } else if (flag == DISABLE_INTRS) {
1675                         /*
1676                          * Disable TxDMA Intrs in the general intr mask
1677                          * register
1678                          */
1679                         writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1680                         writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1681                         temp64 = readq(&bar0->general_int_mask);
1682                         val64 |= temp64;
1683                         writeq(val64, &bar0->general_int_mask);
1684                 }
1685         }
1686
1687         /*  Enabling/Disabling Rx DMA interrupts */
1688         if (mask & RX_DMA_INTR) {
1689                 /*  Enable RxDMA Intrs in the general intr mask register */
1690                 val64 = RXDMA_INT_M;
1691                 if (flag == ENABLE_INTRS) {
1692                         temp64 = readq(&bar0->general_int_mask);
1693                         temp64 &= ~((u64) val64);
1694                         writeq(temp64, &bar0->general_int_mask);
1695                         /*
1696                          * All RxDMA block interrupts are disabled for now
1697                          * TODO
1698                          */
1699                         writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1700                 } else if (flag == DISABLE_INTRS) {
1701                         /*
1702                          * Disable RxDMA Intrs in the general intr mask
1703                          * register
1704                          */
1705                         writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1706                         temp64 = readq(&bar0->general_int_mask);
1707                         val64 |= temp64;
1708                         writeq(val64, &bar0->general_int_mask);
1709                 }
1710         }
1711
1712         /*  MAC Interrupts */
1713         /*  Enabling/Disabling MAC interrupts */
1714         if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1715                 val64 = TXMAC_INT_M | RXMAC_INT_M;
1716                 if (flag == ENABLE_INTRS) {
1717                         temp64 = readq(&bar0->general_int_mask);
1718                         temp64 &= ~((u64) val64);
1719                         writeq(temp64, &bar0->general_int_mask);
1720                         /*
1721                          * All MAC block error interrupts are disabled for now
1722                          * TODO
1723                          */
1724                 } else if (flag == DISABLE_INTRS) {
1725                         /*
1726                          * Disable MAC Intrs in the general intr mask register
1727                          */
1728                         writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1729                         writeq(DISABLE_ALL_INTRS,
1730                                &bar0->mac_rmac_err_mask);
1731
1732                         temp64 = readq(&bar0->general_int_mask);
1733                         val64 |= temp64;
1734                         writeq(val64, &bar0->general_int_mask);
1735                 }
1736         }
1737
1738         /*  XGXS Interrupts */
1739         if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1740                 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1741                 if (flag == ENABLE_INTRS) {
1742                         temp64 = readq(&bar0->general_int_mask);
1743                         temp64 &= ~((u64) val64);
1744                         writeq(temp64, &bar0->general_int_mask);
1745                         /*
1746                          * All XGXS block error interrupts are disabled for now
1747                          * TODO
1748                          */
1749                         writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1750                 } else if (flag == DISABLE_INTRS) {
1751                         /*
1752                          * Disable MC Intrs in the general intr mask register
1753                          */
1754                         writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1755                         temp64 = readq(&bar0->general_int_mask);
1756                         val64 |= temp64;
1757                         writeq(val64, &bar0->general_int_mask);
1758                 }
1759         }
1760
1761         /*  Memory Controller(MC) interrupts */
1762         if (mask & MC_INTR) {
1763                 val64 = MC_INT_M;
1764                 if (flag == ENABLE_INTRS) {
1765                         temp64 = readq(&bar0->general_int_mask);
1766                         temp64 &= ~((u64) val64);
1767                         writeq(temp64, &bar0->general_int_mask);
1768                         /*
1769                          * Enable all MC Intrs.
1770                          */
1771                         writeq(0x0, &bar0->mc_int_mask);
1772                         writeq(0x0, &bar0->mc_err_mask);
1773                 } else if (flag == DISABLE_INTRS) {
1774                         /*
1775                          * Disable MC Intrs in the general intr mask register
1776                          */
1777                         writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1778                         temp64 = readq(&bar0->general_int_mask);
1779                         val64 |= temp64;
1780                         writeq(val64, &bar0->general_int_mask);
1781                 }
1782         }
1783
1784
1785         /*  Tx traffic interrupts */
1786         if (mask & TX_TRAFFIC_INTR) {
1787                 val64 = TXTRAFFIC_INT_M;
1788                 if (flag == ENABLE_INTRS) {
1789                         temp64 = readq(&bar0->general_int_mask);
1790                         temp64 &= ~((u64) val64);
1791                         writeq(temp64, &bar0->general_int_mask);
1792                         /*
1793                          * Enable all the Tx side interrupts
1794                          * writing 0 Enables all 64 TX interrupt levels
1795                          */
1796                         writeq(0x0, &bar0->tx_traffic_mask);
1797                 } else if (flag == DISABLE_INTRS) {
1798                         /*
1799                          * Disable Tx Traffic Intrs in the general intr mask
1800                          * register.
1801                          */
1802                         writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1803                         temp64 = readq(&bar0->general_int_mask);
1804                         val64 |= temp64;
1805                         writeq(val64, &bar0->general_int_mask);
1806                 }
1807         }
1808
1809         /*  Rx traffic interrupts */
1810         if (mask & RX_TRAFFIC_INTR) {
1811                 val64 = RXTRAFFIC_INT_M;
1812                 if (flag == ENABLE_INTRS) {
1813                         temp64 = readq(&bar0->general_int_mask);
1814                         temp64 &= ~((u64) val64);
1815                         writeq(temp64, &bar0->general_int_mask);
1816                         /* writing 0 Enables all 8 RX interrupt levels */
1817                         writeq(0x0, &bar0->rx_traffic_mask);
1818                 } else if (flag == DISABLE_INTRS) {
1819                         /*
1820                          * Disable Rx Traffic Intrs in the general intr mask
1821                          * register.
1822                          */
1823                         writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1824                         temp64 = readq(&bar0->general_int_mask);
1825                         val64 |= temp64;
1826                         writeq(val64, &bar0->general_int_mask);
1827                 }
1828         }
1829 }
1830
1831 static int check_prc_pcc_state(u64 val64, int flag, int rev_id, int herc)
1832 {
1833         int ret = 0;
1834
1835         if (flag == FALSE) {
1836                 if ((!herc && (rev_id >= 4)) || herc) {
1837                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1838                             ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1839                              ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1840                                 ret = 1;
1841                         }
1842                 }else {
1843                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1844                             ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1845                              ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1846                                 ret = 1;
1847                         }
1848                 }
1849         } else {
1850                 if ((!herc && (rev_id >= 4)) || herc) {
1851                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1852                              ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1853                             (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1854                              ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1855                               ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1856                                 ret = 1;
1857                         }
1858                 } else {
1859                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1860                              ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1861                             (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1862                              ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1863                               ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1864                                 ret = 1;
1865                         }
1866                 }
1867         }
1868
1869         return ret;
1870 }
1871 /**
1872  *  verify_xena_quiescence - Checks whether the H/W is ready
1873  *  @val64 :  Value read from adapter status register.
1874  *  @flag : indicates if the adapter enable bit was ever written once
1875  *  before.
1876  *  Description: Returns whether the H/W is ready to go or not. Depending
1877  *  on whether adapter enable bit was written or not the comparison
1878  *  differs and the calling function passes the input argument flag to
1879  *  indicate this.
1880  *  Return: 1 If xena is quiescence
1881  *          0 If Xena is not quiescence
1882  */
1883
1884 static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag)
1885 {
1886         int ret = 0, herc;
1887         u64 tmp64 = ~((u64) val64);
1888         int rev_id = get_xena_rev_id(sp->pdev);
1889
1890         herc = (sp->device_type == XFRAME_II_DEVICE);
1891         if (!
1892             (tmp64 &
1893              (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
1894               ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY |
1895               ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
1896               ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
1897               ADAPTER_STATUS_P_PLL_LOCK))) {
1898                 ret = check_prc_pcc_state(val64, flag, rev_id, herc);
1899         }
1900
1901         return ret;
1902 }
1903
1904 /**
1905  * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
1906  * @sp: Pointer to device specifc structure
1907  * Description :
1908  * New procedure to clear mac address reading  problems on Alpha platforms
1909  *
1910  */
1911
1912 static void fix_mac_address(nic_t * sp)
1913 {
1914         XENA_dev_config_t __iomem *bar0 = sp->bar0;
1915         u64 val64;
1916         int i = 0;
1917
1918         while (fix_mac[i] != END_SIGN) {
1919                 writeq(fix_mac[i++], &bar0->gpio_control);
1920                 udelay(10);
1921                 val64 = readq(&bar0->gpio_control);
1922         }
1923 }
1924
1925 /**
1926  *  start_nic - Turns the device on
1927  *  @nic : device private variable.
1928  *  Description:
1929  *  This function actually turns the device on. Before this  function is
1930  *  called,all Registers are configured from their reset states
1931  *  and shared memory is allocated but the NIC is still quiescent. On
1932  *  calling this function, the device interrupts are cleared and the NIC is
1933  *  literally switched on by writing into the adapter control register.
1934  *  Return Value:
1935  *  SUCCESS on success and -1 on failure.
1936  */
1937
1938 static int start_nic(struct s2io_nic *nic)
1939 {
1940         XENA_dev_config_t __iomem *bar0 = nic->bar0;
1941         struct net_device *dev = nic->dev;
1942         register u64 val64 = 0;
1943         u16 interruptible;
1944         u16 subid, i;
1945         mac_info_t *mac_control;
1946         struct config_param *config;
1947
1948         mac_control = &nic->mac_control;
1949         config = &nic->config;
1950
1951         /*  PRC Initialization and configuration */
1952         for (i = 0; i < config->rx_ring_num; i++) {
1953                 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1954                        &bar0->prc_rxd0_n[i]);
1955
1956                 val64 = readq(&bar0->prc_ctrl_n[i]);
1957                 if (nic->config.bimodal)
1958                         val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
1959                 if (nic->rxd_mode == RXD_MODE_1)
1960                         val64 |= PRC_CTRL_RC_ENABLED;
1961                 else
1962                         val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1963                 if (nic->device_type == XFRAME_II_DEVICE)
1964                         val64 |= PRC_CTRL_GROUP_READS;
1965                 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
1966                 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
1967                 writeq(val64, &bar0->prc_ctrl_n[i]);
1968         }
1969
1970         if (nic->rxd_mode == RXD_MODE_3B) {
1971                 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1972                 val64 = readq(&bar0->rx_pa_cfg);
1973                 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1974                 writeq(val64, &bar0->rx_pa_cfg);
1975         }
1976
1977         /*
1978          * Enabling MC-RLDRAM. After enabling the device, we timeout
1979          * for around 100ms, which is approximately the time required
1980          * for the device to be ready for operation.
1981          */
1982         val64 = readq(&bar0->mc_rldram_mrs);
1983         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1984         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1985         val64 = readq(&bar0->mc_rldram_mrs);
1986
1987         msleep(100);    /* Delay by around 100 ms. */
1988
1989         /* Enabling ECC Protection. */
1990         val64 = readq(&bar0->adapter_control);
1991         val64 &= ~ADAPTER_ECC_EN;
1992         writeq(val64, &bar0->adapter_control);
1993
1994         /*
1995          * Clearing any possible Link state change interrupts that
1996          * could have popped up just before Enabling the card.
1997          */
1998         val64 = readq(&bar0->mac_rmac_err_reg);
1999         if (val64)
2000                 writeq(val64, &bar0->mac_rmac_err_reg);
2001
2002         /*
2003          * Verify if the device is ready to be enabled, if so enable
2004          * it.
2005          */
2006         val64 = readq(&bar0->adapter_status);
2007         if (!verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
2008                 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2009                 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2010                           (unsigned long long) val64);
2011                 return FAILURE;
2012         }
2013
2014         /*  Enable select interrupts */
2015         if (nic->intr_type != INTA)
2016                 en_dis_able_nic_intrs(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2017         else {
2018                 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2019                 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2020                 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
2021                 en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
2022         }
2023
2024         /*
2025          * With some switches, link might be already up at this point.
2026          * Because of this weird behavior, when we enable laser,
2027          * we may not get link. We need to handle this. We cannot
2028          * figure out which switch is misbehaving. So we are forced to
2029          * make a global change.
2030          */
2031
2032         /* Enabling Laser. */
2033         val64 = readq(&bar0->adapter_control);
2034         val64 |= ADAPTER_EOI_TX_ON;
2035         writeq(val64, &bar0->adapter_control);
2036
2037         /* SXE-002: Initialize link and activity LED */
2038         subid = nic->pdev->subsystem_device;
2039         if (((subid & 0xFF) >= 0x07) &&
2040             (nic->device_type == XFRAME_I_DEVICE)) {
2041                 val64 = readq(&bar0->gpio_control);
2042                 val64 |= 0x0000800000000000ULL;
2043                 writeq(val64, &bar0->gpio_control);
2044                 val64 = 0x0411040400000000ULL;
2045                 writeq(val64, (void __iomem *)bar0 + 0x2700);
2046         }
2047
2048         /*
2049          * Don't see link state interrupts on certain switches, so
2050          * directly scheduling a link state task from here.
2051          */
2052         schedule_work(&nic->set_link_task);
2053
2054         return SUCCESS;
2055 }
2056 /**
2057  * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2058  */
2059 static struct sk_buff *s2io_txdl_getskb(fifo_info_t *fifo_data, TxD_t *txdlp, int get_off)
2060 {
2061         nic_t *nic = fifo_data->nic;
2062         struct sk_buff *skb;
2063         TxD_t *txds;
2064         u16 j, frg_cnt;
2065
2066         txds = txdlp;
2067         if (txds->Host_Control == (u64)(long)nic->ufo_in_band_v) {
2068                 pci_unmap_single(nic->pdev, (dma_addr_t)
2069                         txds->Buffer_Pointer, sizeof(u64),
2070                         PCI_DMA_TODEVICE);
2071                 txds++;
2072         }
2073
2074         skb = (struct sk_buff *) ((unsigned long)
2075                         txds->Host_Control);
2076         if (!skb) {
2077                 memset(txdlp, 0, (sizeof(TxD_t) * fifo_data->max_txds));
2078                 return NULL;
2079         }
2080         pci_unmap_single(nic->pdev, (dma_addr_t)
2081                          txds->Buffer_Pointer,
2082                          skb->len - skb->data_len,
2083                          PCI_DMA_TODEVICE);
2084         frg_cnt = skb_shinfo(skb)->nr_frags;
2085         if (frg_cnt) {
2086                 txds++;
2087                 for (j = 0; j < frg_cnt; j++, txds++) {
2088                         skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2089                         if (!txds->Buffer_Pointer)
2090                                 break;
2091                         pci_unmap_page(nic->pdev, (dma_addr_t) 
2092                                         txds->Buffer_Pointer,
2093                                        frag->size, PCI_DMA_TODEVICE);
2094                 }
2095         }
2096         txdlp->Host_Control = 0;
2097         return(skb);
2098 }
2099
2100 /**
2101  *  free_tx_buffers - Free all queued Tx buffers
2102  *  @nic : device private variable.
2103  *  Description:
2104  *  Free all queued Tx buffers.
2105  *  Return Value: void
2106 */
2107
2108 static void free_tx_buffers(struct s2io_nic *nic)
2109 {
2110         struct net_device *dev = nic->dev;
2111         struct sk_buff *skb;
2112         TxD_t *txdp;
2113         int i, j;
2114         mac_info_t *mac_control;
2115         struct config_param *config;
2116         int cnt = 0;
2117
2118         mac_control = &nic->mac_control;
2119         config = &nic->config;
2120
2121         for (i = 0; i < config->tx_fifo_num; i++) {
2122                 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2123                         txdp = (TxD_t *) mac_control->fifos[i].list_info[j].
2124                             list_virt_addr;
2125                         skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2126                         if (skb) {
2127                                 dev_kfree_skb(skb);
2128                                 cnt++;
2129                         }
2130                 }
2131                 DBG_PRINT(INTR_DBG,
2132                           "%s:forcibly freeing %d skbs on FIFO%d\n",
2133                           dev->name, cnt, i);
2134                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2135                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2136         }
2137 }
2138
2139 /**
2140  *   stop_nic -  To stop the nic
2141  *   @nic ; device private variable.
2142  *   Description:
2143  *   This function does exactly the opposite of what the start_nic()
2144  *   function does. This function is called to stop the device.
2145  *   Return Value:
2146  *   void.
2147  */
2148
2149 static void stop_nic(struct s2io_nic *nic)
2150 {
2151         XENA_dev_config_t __iomem *bar0 = nic->bar0;
2152         register u64 val64 = 0;
2153         u16 interruptible, i;
2154         mac_info_t *mac_control;
2155         struct config_param *config;
2156
2157         mac_control = &nic->mac_control;
2158         config = &nic->config;
2159
2160         /*  Disable all interrupts */
2161         interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2162         interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2163         interruptible |= TX_MAC_INTR | RX_MAC_INTR;
2164         en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2165
2166         /*  Disable PRCs */
2167         for (i = 0; i < config->rx_ring_num; i++) {
2168                 val64 = readq(&bar0->prc_ctrl_n[i]);
2169                 val64 &= ~((u64) PRC_CTRL_RC_ENABLED);
2170                 writeq(val64, &bar0->prc_ctrl_n[i]);
2171         }
2172 }
2173
2174 static int fill_rxd_3buf(nic_t *nic, RxD_t *rxdp, struct sk_buff *skb)
2175 {
2176         struct net_device *dev = nic->dev;
2177         struct sk_buff *frag_list;
2178         void *tmp;
2179
2180         /* Buffer-1 receives L3/L4 headers */
2181         ((RxD3_t*)rxdp)->Buffer1_ptr = pci_map_single
2182                         (nic->pdev, skb->data, l3l4hdr_size + 4,
2183                         PCI_DMA_FROMDEVICE);
2184
2185         /* skb_shinfo(skb)->frag_list will have L4 data payload */
2186         skb_shinfo(skb)->frag_list = dev_alloc_skb(dev->mtu + ALIGN_SIZE);
2187         if (skb_shinfo(skb)->frag_list == NULL) {
2188                 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n ", dev->name);
2189                 return -ENOMEM ;
2190         }
2191         frag_list = skb_shinfo(skb)->frag_list;
2192         frag_list->next = NULL;
2193         tmp = (void *)ALIGN((long)frag_list->data, ALIGN_SIZE + 1);
2194         frag_list->data = tmp;
2195         frag_list->tail = tmp;
2196
2197         /* Buffer-2 receives L4 data payload */
2198         ((RxD3_t*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev,
2199                                 frag_list->data, dev->mtu,
2200                                 PCI_DMA_FROMDEVICE);
2201         rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
2202         rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
2203
2204         return SUCCESS;
2205 }
2206
2207 /**
2208  *  fill_rx_buffers - Allocates the Rx side skbs
2209  *  @nic:  device private variable
2210  *  @ring_no: ring number
2211  *  Description:
2212  *  The function allocates Rx side skbs and puts the physical
2213  *  address of these buffers into the RxD buffer pointers, so that the NIC
2214  *  can DMA the received frame into these locations.
2215  *  The NIC supports 3 receive modes, viz
2216  *  1. single buffer,
2217  *  2. three buffer and
2218  *  3. Five buffer modes.
2219  *  Each mode defines how many fragments the received frame will be split
2220  *  up into by the NIC. The frame is split into L3 header, L4 Header,
2221  *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2222  *  is split into 3 fragments. As of now only single buffer mode is
2223  *  supported.
2224  *   Return Value:
2225  *  SUCCESS on success or an appropriate -ve value on failure.
2226  */
2227
2228 static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2229 {
2230         struct net_device *dev = nic->dev;
2231         struct sk_buff *skb;
2232         RxD_t *rxdp;
2233         int off, off1, size, block_no, block_no1;
2234         u32 alloc_tab = 0;
2235         u32 alloc_cnt;
2236         mac_info_t *mac_control;
2237         struct config_param *config;
2238         u64 tmp;
2239         buffAdd_t *ba;
2240 #ifndef CONFIG_S2IO_NAPI
2241         unsigned long flags;
2242 #endif
2243         RxD_t *first_rxdp = NULL;
2244
2245         mac_control = &nic->mac_control;
2246         config = &nic->config;
2247         alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2248             atomic_read(&nic->rx_bufs_left[ring_no]);
2249
2250         block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
2251         off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2252         while (alloc_tab < alloc_cnt) {
2253                 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2254                     block_index;
2255                 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2256
2257                 rxdp = mac_control->rings[ring_no].
2258                                 rx_blocks[block_no].rxds[off].virt_addr;
2259
2260                 if ((block_no == block_no1) && (off == off1) &&
2261                                         (rxdp->Host_Control)) {
2262                         DBG_PRINT(INTR_DBG, "%s: Get and Put",
2263                                   dev->name);
2264                         DBG_PRINT(INTR_DBG, " info equated\n");
2265                         goto end;
2266                 }
2267                 if (off && (off == rxd_count[nic->rxd_mode])) {
2268                         mac_control->rings[ring_no].rx_curr_put_info.
2269                             block_index++;
2270                         if (mac_control->rings[ring_no].rx_curr_put_info.
2271                             block_index == mac_control->rings[ring_no].
2272                                         block_count)
2273                                 mac_control->rings[ring_no].rx_curr_put_info.
2274                                         block_index = 0;
2275                         block_no = mac_control->rings[ring_no].
2276                                         rx_curr_put_info.block_index;
2277                         if (off == rxd_count[nic->rxd_mode])
2278                                 off = 0;
2279                         mac_control->rings[ring_no].rx_curr_put_info.
2280                                 offset = off;
2281                         rxdp = mac_control->rings[ring_no].
2282                                 rx_blocks[block_no].block_virt_addr;
2283                         DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2284                                   dev->name, rxdp);
2285                 }
2286 #ifndef CONFIG_S2IO_NAPI
2287                 spin_lock_irqsave(&nic->put_lock, flags);
2288                 mac_control->rings[ring_no].put_pos =
2289                     (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2290                 spin_unlock_irqrestore(&nic->put_lock, flags);
2291 #endif
2292                 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2293                         ((nic->rxd_mode >= RXD_MODE_3A) &&
2294                                 (rxdp->Control_2 & BIT(0)))) {
2295                         mac_control->rings[ring_no].rx_curr_put_info.
2296                                         offset = off;
2297                         goto end;
2298                 }
2299                 /* calculate size of skb based on ring mode */
2300                 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2301                                 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2302                 if (nic->rxd_mode == RXD_MODE_1)
2303                         size += NET_IP_ALIGN;
2304                 else if (nic->rxd_mode == RXD_MODE_3B)
2305                         size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2306                 else
2307                         size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
2308
2309                 /* allocate skb */
2310                 skb = dev_alloc_skb(size);
2311                 if(!skb) {
2312                         DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
2313                         DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
2314                         if (first_rxdp) {
2315                                 wmb();
2316                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2317                         }
2318                         return -ENOMEM ;
2319                 }
2320                 if (nic->rxd_mode == RXD_MODE_1) {
2321                         /* 1 buffer mode - normal operation mode */
2322                         memset(rxdp, 0, sizeof(RxD1_t));
2323                         skb_reserve(skb, NET_IP_ALIGN);
2324                         ((RxD1_t*)rxdp)->Buffer0_ptr = pci_map_single
2325                             (nic->pdev, skb->data, size - NET_IP_ALIGN,
2326                                 PCI_DMA_FROMDEVICE);
2327                         rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2328
2329                 } else if (nic->rxd_mode >= RXD_MODE_3A) {
2330                         /*
2331                          * 2 or 3 buffer mode -
2332                          * Both 2 buffer mode and 3 buffer mode provides 128
2333                          * byte aligned receive buffers.
2334                          *
2335                          * 3 buffer mode provides header separation where in
2336                          * skb->data will have L3/L4 headers where as
2337                          * skb_shinfo(skb)->frag_list will have the L4 data
2338                          * payload
2339                          */
2340
2341                         memset(rxdp, 0, sizeof(RxD3_t));
2342                         ba = &mac_control->rings[ring_no].ba[block_no][off];
2343                         skb_reserve(skb, BUF0_LEN);
2344                         tmp = (u64)(unsigned long) skb->data;
2345                         tmp += ALIGN_SIZE;
2346                         tmp &= ~ALIGN_SIZE;
2347                         skb->data = (void *) (unsigned long)tmp;
2348                         skb->tail = (void *) (unsigned long)tmp;
2349
2350                         ((RxD3_t*)rxdp)->Buffer0_ptr =
2351                             pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2352                                            PCI_DMA_FROMDEVICE);
2353                         rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2354                         if (nic->rxd_mode == RXD_MODE_3B) {
2355                                 /* Two buffer mode */
2356
2357                                 /*
2358                                  * Buffer2 will have L3/L4 header plus 
2359                                  * L4 payload
2360                                  */
2361                                 ((RxD3_t*)rxdp)->Buffer2_ptr = pci_map_single
2362                                 (nic->pdev, skb->data, dev->mtu + 4,
2363                                                 PCI_DMA_FROMDEVICE);
2364
2365                                 /* Buffer-1 will be dummy buffer not used */
2366                                 ((RxD3_t*)rxdp)->Buffer1_ptr =
2367                                 pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
2368                                         PCI_DMA_FROMDEVICE);
2369                                 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2370                                 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2371                                                                 (dev->mtu + 4);
2372                         } else {
2373                                 /* 3 buffer mode */
2374                                 if (fill_rxd_3buf(nic, rxdp, skb) == -ENOMEM) {
2375                                         dev_kfree_skb_irq(skb);
2376                                         if (first_rxdp) {
2377                                                 wmb();
2378                                                 first_rxdp->Control_1 |=
2379                                                         RXD_OWN_XENA;
2380                                         }
2381                                         return -ENOMEM ;
2382                                 }
2383                         }
2384                         rxdp->Control_2 |= BIT(0);
2385                 }
2386                 rxdp->Host_Control = (unsigned long) (skb);
2387                 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2388                         rxdp->Control_1 |= RXD_OWN_XENA;
2389                 off++;
2390                 if (off == (rxd_count[nic->rxd_mode] + 1))
2391                         off = 0;
2392                 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2393
2394                 rxdp->Control_2 |= SET_RXD_MARKER;
2395                 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2396                         if (first_rxdp) {
2397                                 wmb();
2398                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2399                         }
2400                         first_rxdp = rxdp;
2401                 }
2402                 atomic_inc(&nic->rx_bufs_left[ring_no]);
2403                 alloc_tab++;
2404         }
2405
2406       end:
2407         /* Transfer ownership of first descriptor to adapter just before
2408          * exiting. Before that, use memory barrier so that ownership
2409          * and other fields are seen by adapter correctly.
2410          */
2411         if (first_rxdp) {
2412                 wmb();
2413                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2414         }
2415
2416         return SUCCESS;
2417 }
2418
2419 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2420 {
2421         struct net_device *dev = sp->dev;
2422         int j;
2423         struct sk_buff *skb;
2424         RxD_t *rxdp;
2425         mac_info_t *mac_control;
2426         buffAdd_t *ba;
2427
2428         mac_control = &sp->mac_control;
2429         for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2430                 rxdp = mac_control->rings[ring_no].
2431                                 rx_blocks[blk].rxds[j].virt_addr;
2432                 skb = (struct sk_buff *)
2433                         ((unsigned long) rxdp->Host_Control);
2434                 if (!skb) {
2435                         continue;
2436                 }
2437                 if (sp->rxd_mode == RXD_MODE_1) {
2438                         pci_unmap_single(sp->pdev, (dma_addr_t)
2439                                  ((RxD1_t*)rxdp)->Buffer0_ptr,
2440                                  dev->mtu +
2441                                  HEADER_ETHERNET_II_802_3_SIZE
2442                                  + HEADER_802_2_SIZE +
2443                                  HEADER_SNAP_SIZE,
2444                                  PCI_DMA_FROMDEVICE);
2445                         memset(rxdp, 0, sizeof(RxD1_t));
2446                 } else if(sp->rxd_mode == RXD_MODE_3B) {
2447                         ba = &mac_control->rings[ring_no].
2448                                 ba[blk][j];
2449                         pci_unmap_single(sp->pdev, (dma_addr_t)
2450                                  ((RxD3_t*)rxdp)->Buffer0_ptr,
2451                                  BUF0_LEN,
2452                                  PCI_DMA_FROMDEVICE);
2453                         pci_unmap_single(sp->pdev, (dma_addr_t)
2454                                  ((RxD3_t*)rxdp)->Buffer1_ptr,
2455                                  BUF1_LEN,
2456                                  PCI_DMA_FROMDEVICE);
2457                         pci_unmap_single(sp->pdev, (dma_addr_t)
2458                                  ((RxD3_t*)rxdp)->Buffer2_ptr,
2459                                  dev->mtu + 4,
2460                                  PCI_DMA_FROMDEVICE);
2461                         memset(rxdp, 0, sizeof(RxD3_t));
2462                 } else {
2463                         pci_unmap_single(sp->pdev, (dma_addr_t)
2464                                 ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN,
2465                                 PCI_DMA_FROMDEVICE);
2466                         pci_unmap_single(sp->pdev, (dma_addr_t)
2467                                 ((RxD3_t*)rxdp)->Buffer1_ptr, 
2468                                 l3l4hdr_size + 4,
2469                                 PCI_DMA_FROMDEVICE);
2470                         pci_unmap_single(sp->pdev, (dma_addr_t)
2471                                 ((RxD3_t*)rxdp)->Buffer2_ptr, dev->mtu,
2472                                 PCI_DMA_FROMDEVICE);
2473                         memset(rxdp, 0, sizeof(RxD3_t));
2474                 }
2475                 dev_kfree_skb(skb);
2476                 atomic_dec(&sp->rx_bufs_left[ring_no]);
2477         }
2478 }
2479
2480 /**
2481  *  free_rx_buffers - Frees all Rx buffers
2482  *  @sp: device private variable.
2483  *  Description:
2484  *  This function will free all Rx buffers allocated by host.
2485  *  Return Value:
2486  *  NONE.
2487  */
2488
2489 static void free_rx_buffers(struct s2io_nic *sp)
2490 {
2491         struct net_device *dev = sp->dev;
2492         int i, blk = 0, buf_cnt = 0;
2493         mac_info_t *mac_control;
2494         struct config_param *config;
2495
2496         mac_control = &sp->mac_control;
2497         config = &sp->config;
2498
2499         for (i = 0; i < config->rx_ring_num; i++) {
2500                 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2501                         free_rxd_blk(sp,i,blk);
2502
2503                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2504                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2505                 mac_control->rings[i].rx_curr_put_info.offset = 0;
2506                 mac_control->rings[i].rx_curr_get_info.offset = 0;
2507                 atomic_set(&sp->rx_bufs_left[i], 0);
2508                 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2509                           dev->name, buf_cnt, i);
2510         }
2511 }
2512
2513 /**
2514  * s2io_poll - Rx interrupt handler for NAPI support
2515  * @dev : pointer to the device structure.
2516  * @budget : The number of packets that were budgeted to be processed
2517  * during  one pass through the 'Poll" function.
2518  * Description:
2519  * Comes into picture only if NAPI support has been incorporated. It does
2520  * the same thing that rx_intr_handler does, but not in a interrupt context
2521  * also It will process only a given number of packets.
2522  * Return value:
2523  * 0 on success and 1 if there are No Rx packets to be processed.
2524  */
2525
2526 #if defined(CONFIG_S2IO_NAPI)
2527 static int s2io_poll(struct net_device *dev, int *budget)
2528 {
2529         nic_t *nic = dev->priv;
2530         int pkt_cnt = 0, org_pkts_to_process;
2531         mac_info_t *mac_control;
2532         struct config_param *config;
2533         XENA_dev_config_t __iomem *bar0 = nic->bar0;
2534         u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2535         int i;
2536
2537         atomic_inc(&nic->isr_cnt);
2538         mac_control = &nic->mac_control;
2539         config = &nic->config;
2540
2541         nic->pkts_to_process = *budget;
2542         if (nic->pkts_to_process > dev->quota)
2543                 nic->pkts_to_process = dev->quota;
2544         org_pkts_to_process = nic->pkts_to_process;
2545
2546         writeq(val64, &bar0->rx_traffic_int);
2547         val64 = readl(&bar0->rx_traffic_int);
2548
2549         for (i = 0; i < config->rx_ring_num; i++) {
2550                 rx_intr_handler(&mac_control->rings[i]);
2551                 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2552                 if (!nic->pkts_to_process) {
2553                         /* Quota for the current iteration has been met */
2554                         goto no_rx;
2555                 }
2556         }
2557         if (!pkt_cnt)
2558                 pkt_cnt = 1;
2559
2560         dev->quota -= pkt_cnt;
2561         *budget -= pkt_cnt;
2562         netif_rx_complete(dev);
2563
2564         for (i = 0; i < config->rx_ring_num; i++) {
2565                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2566                         DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2567                         DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2568                         break;
2569                 }
2570         }
2571         /* Re enable the Rx interrupts. */
2572         en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS);
2573         atomic_dec(&nic->isr_cnt);
2574         return 0;
2575
2576 no_rx:
2577         dev->quota -= pkt_cnt;
2578         *budget -= pkt_cnt;
2579
2580         for (i = 0; i < config->rx_ring_num; i++) {
2581                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2582                         DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2583                         DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2584                         break;
2585                 }
2586         }
2587         atomic_dec(&nic->isr_cnt);
2588         return 1;
2589 }
2590 #endif
2591
2592 /**
2593  *  rx_intr_handler - Rx interrupt handler
2594  *  @nic: device private variable.
2595  *  Description:
2596  *  If the interrupt is because of a received frame or if the
2597  *  receive ring contains fresh as yet un-processed frames,this function is
2598  *  called. It picks out the RxD at which place the last Rx processing had
2599  *  stopped and sends the skb to the OSM's Rx handler and then increments
2600  *  the offset.
2601  *  Return Value:
2602  *  NONE.
2603  */
2604 static void rx_intr_handler(ring_info_t *ring_data)
2605 {
2606         nic_t *nic = ring_data->nic;
2607         struct net_device *dev = (struct net_device *) nic->dev;
2608         int get_block, put_block, put_offset;
2609         rx_curr_get_info_t get_info, put_info;
2610         RxD_t *rxdp;
2611         struct sk_buff *skb;
2612 #ifndef CONFIG_S2IO_NAPI
2613         int pkt_cnt = 0;
2614 #endif
2615         int i;
2616
2617         spin_lock(&nic->rx_lock);
2618         if (atomic_read(&nic->card_state) == CARD_DOWN) {
2619                 DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n",
2620                           __FUNCTION__, dev->name);
2621                 spin_unlock(&nic->rx_lock);
2622                 return;
2623         }
2624
2625         get_info = ring_data->rx_curr_get_info;
2626         get_block = get_info.block_index;
2627         put_info = ring_data->rx_curr_put_info;
2628         put_block = put_info.block_index;
2629         rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2630 #ifndef CONFIG_S2IO_NAPI
2631         spin_lock(&nic->put_lock);
2632         put_offset = ring_data->put_pos;
2633         spin_unlock(&nic->put_lock);
2634 #else
2635         put_offset = (put_block * (rxd_count[nic->rxd_mode] + 1)) +
2636                 put_info.offset;
2637 #endif
2638         while (RXD_IS_UP2DT(rxdp)) {
2639                 /* If your are next to put index then it's FIFO full condition */
2640                 if ((get_block == put_block) &&
2641                     (get_info.offset + 1) == put_info.offset) {
2642                         DBG_PRINT(ERR_DBG, "%s: Ring Full\n",dev->name);
2643                         break;
2644                 }
2645                 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2646                 if (skb == NULL) {
2647                         DBG_PRINT(ERR_DBG, "%s: The skb is ",
2648                                   dev->name);
2649                         DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2650                         spin_unlock(&nic->rx_lock);
2651                         return;
2652                 }
2653                 if (nic->rxd_mode == RXD_MODE_1) {
2654                         pci_unmap_single(nic->pdev, (dma_addr_t)
2655                                  ((RxD1_t*)rxdp)->Buffer0_ptr,
2656                                  dev->mtu +
2657                                  HEADER_ETHERNET_II_802_3_SIZE +
2658                                  HEADER_802_2_SIZE +
2659                                  HEADER_SNAP_SIZE,
2660                                  PCI_DMA_FROMDEVICE);
2661                 } else if (nic->rxd_mode == RXD_MODE_3B) {
2662                         pci_unmap_single(nic->pdev, (dma_addr_t)
2663                                  ((RxD3_t*)rxdp)->Buffer0_ptr,
2664                                  BUF0_LEN, PCI_DMA_FROMDEVICE);
2665                         pci_unmap_single(nic->pdev, (dma_addr_t)
2666                                  ((RxD3_t*)rxdp)->Buffer1_ptr,
2667                                  BUF1_LEN, PCI_DMA_FROMDEVICE);
2668                         pci_unmap_single(nic->pdev, (dma_addr_t)
2669                                  ((RxD3_t*)rxdp)->Buffer2_ptr,
2670                                  dev->mtu + 4,
2671                                  PCI_DMA_FROMDEVICE);
2672                 } else {
2673                         pci_unmap_single(nic->pdev, (dma_addr_t)
2674                                          ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN,
2675                                          PCI_DMA_FROMDEVICE);
2676                         pci_unmap_single(nic->pdev, (dma_addr_t)
2677                                          ((RxD3_t*)rxdp)->Buffer1_ptr,
2678                                          l3l4hdr_size + 4,
2679                                          PCI_DMA_FROMDEVICE);
2680                         pci_unmap_single(nic->pdev, (dma_addr_t)
2681                                          ((RxD3_t*)rxdp)->Buffer2_ptr,
2682                                          dev->mtu, PCI_DMA_FROMDEVICE);
2683                 }
2684                 prefetch(skb->data);
2685                 rx_osm_handler(ring_data, rxdp);
2686                 get_info.offset++;
2687                 ring_data->rx_curr_get_info.offset = get_info.offset;
2688                 rxdp = ring_data->rx_blocks[get_block].
2689                                 rxds[get_info.offset].virt_addr;
2690                 if (get_info.offset == rxd_count[nic->rxd_mode]) {
2691                         get_info.offset = 0;
2692                         ring_data->rx_curr_get_info.offset = get_info.offset;
2693                         get_block++;
2694                         if (get_block == ring_data->block_count)
2695                                 get_block = 0;
2696                         ring_data->rx_curr_get_info.block_index = get_block;
2697                         rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2698                 }
2699
2700 #ifdef CONFIG_S2IO_NAPI
2701                 nic->pkts_to_process -= 1;
2702                 if (!nic->pkts_to_process)
2703                         break;
2704 #else
2705                 pkt_cnt++;
2706                 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2707                         break;
2708 #endif
2709         }
2710         if (nic->lro) {
2711                 /* Clear all LRO sessions before exiting */
2712                 for (i=0; i<MAX_LRO_SESSIONS; i++) {
2713                         lro_t *lro = &nic->lro0_n[i];
2714                         if (lro->in_use) {
2715                                 update_L3L4_header(nic, lro);
2716                                 queue_rx_frame(lro->parent);
2717                                 clear_lro_session(lro);
2718                         }
2719                 }
2720         }
2721
2722         spin_unlock(&nic->rx_lock);
2723 }
2724
2725 /**
2726  *  tx_intr_handler - Transmit interrupt handler
2727  *  @nic : device private variable
2728  *  Description:
2729  *  If an interrupt was raised to indicate DMA complete of the
2730  *  Tx packet, this function is called. It identifies the last TxD
2731  *  whose buffer was freed and frees all skbs whose data have already
2732  *  DMA'ed into the NICs internal memory.
2733  *  Return Value:
2734  *  NONE
2735  */
2736
2737 static void tx_intr_handler(fifo_info_t *fifo_data)
2738 {
2739         nic_t *nic = fifo_data->nic;
2740         struct net_device *dev = (struct net_device *) nic->dev;
2741         tx_curr_get_info_t get_info, put_info;
2742         struct sk_buff *skb;
2743         TxD_t *txdlp;
2744
2745         get_info = fifo_data->tx_curr_get_info;
2746         put_info = fifo_data->tx_curr_put_info;
2747         txdlp = (TxD_t *) fifo_data->list_info[get_info.offset].
2748             list_virt_addr;
2749         while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2750                (get_info.offset != put_info.offset) &&
2751                (txdlp->Host_Control)) {
2752                 /* Check for TxD errors */
2753                 if (txdlp->Control_1 & TXD_T_CODE) {
2754                         unsigned long long err;
2755                         err = txdlp->Control_1 & TXD_T_CODE;
2756                         if ((err >> 48) == 0xA) {
2757                                 DBG_PRINT(TX_DBG, "TxD returned due \
2758 to loss of link\n");
2759                         }
2760                         else {
2761                                 DBG_PRINT(ERR_DBG, "***TxD error \
2762 %llx\n", err);
2763                         }
2764                 }
2765
2766                 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
2767                 if (skb == NULL) {
2768                         DBG_PRINT(ERR_DBG, "%s: Null skb ",
2769                         __FUNCTION__);
2770                         DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2771                         return;
2772                 }
2773
2774                 /* Updating the statistics block */
2775                 nic->stats.tx_bytes += skb->len;
2776                 dev_kfree_skb_irq(skb);
2777
2778                 get_info.offset++;
2779                 if (get_info.offset == get_info.fifo_len + 1)
2780                         get_info.offset = 0;
2781                 txdlp = (TxD_t *) fifo_data->list_info
2782                     [get_info.offset].list_virt_addr;
2783                 fifo_data->tx_curr_get_info.offset =
2784                     get_info.offset;
2785         }
2786
2787         spin_lock(&nic->tx_lock);
2788         if (netif_queue_stopped(dev))
2789                 netif_wake_queue(dev);
2790         spin_unlock(&nic->tx_lock);
2791 }
2792
2793 /**
2794  *  alarm_intr_handler - Alarm Interrrupt handler
2795  *  @nic: device private variable
2796  *  Description: If the interrupt was neither because of Rx packet or Tx
2797  *  complete, this function is called. If the interrupt was to indicate
2798  *  a loss of link, the OSM link status handler is invoked for any other
2799  *  alarm interrupt the block that raised the interrupt is displayed
2800  *  and a H/W reset is issued.
2801  *  Return Value:
2802  *  NONE
2803 */
2804
2805 static void alarm_intr_handler(struct s2io_nic *nic)
2806 {
2807         struct net_device *dev = (struct net_device *) nic->dev;
2808         XENA_dev_config_t __iomem *bar0 = nic->bar0;
2809         register u64 val64 = 0, err_reg = 0;
2810
2811         /* Handling link status change error Intr */
2812         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2813                 err_reg = readq(&bar0->mac_rmac_err_reg);
2814                 writeq(err_reg, &bar0->mac_rmac_err_reg);
2815                 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
2816                         schedule_work(&nic->set_link_task);
2817                 }
2818         }
2819
2820         /* Handling Ecc errors */
2821         val64 = readq(&bar0->mc_err_reg);
2822         writeq(val64, &bar0->mc_err_reg);
2823         if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
2824                 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
2825                         nic->mac_control.stats_info->sw_stat.
2826                                 double_ecc_errs++;
2827                         DBG_PRINT(INIT_DBG, "%s: Device indicates ",
2828                                   dev->name);
2829                         DBG_PRINT(INIT_DBG, "double ECC error!!\n");
2830                         if (nic->device_type != XFRAME_II_DEVICE) {
2831                                 /* Reset XframeI only if critical error */
2832                                 if (val64 & (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
2833                                              MC_ERR_REG_MIRI_ECC_DB_ERR_1)) {
2834                                         netif_stop_queue(dev);
2835                                         schedule_work(&nic->rst_timer_task);
2836                                 }
2837                         }
2838                 } else {
2839                         nic->mac_control.stats_info->sw_stat.
2840                                 single_ecc_errs++;
2841                 }
2842         }
2843
2844         /* In case of a serious error, the device will be Reset. */
2845         val64 = readq(&bar0->serr_source);
2846         if (val64 & SERR_SOURCE_ANY) {
2847                 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
2848                 DBG_PRINT(ERR_DBG, "serious error %llx!!\n", 
2849                           (unsigned long long)val64);
2850                 netif_stop_queue(dev);
2851                 schedule_work(&nic->rst_timer_task);
2852         }
2853
2854         /*
2855          * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
2856          * Error occurs, the adapter will be recycled by disabling the
2857          * adapter enable bit and enabling it again after the device
2858          * becomes Quiescent.
2859          */
2860         val64 = readq(&bar0->pcc_err_reg);
2861         writeq(val64, &bar0->pcc_err_reg);
2862         if (val64 & PCC_FB_ECC_DB_ERR) {
2863                 u64 ac = readq(&bar0->adapter_control);
2864                 ac &= ~(ADAPTER_CNTL_EN);
2865                 writeq(ac, &bar0->adapter_control);
2866                 ac = readq(&bar0->adapter_control);
2867                 schedule_work(&nic->set_link_task);
2868         }
2869
2870         /* Other type of interrupts are not being handled now,  TODO */
2871 }
2872
2873 /**
2874  *  wait_for_cmd_complete - waits for a command to complete.
2875  *  @sp : private member of the device structure, which is a pointer to the
2876  *  s2io_nic structure.
2877  *  Description: Function that waits for a command to Write into RMAC
2878  *  ADDR DATA registers to be completed and returns either success or
2879  *  error depending on whether the command was complete or not.
2880  *  Return value:
2881  *   SUCCESS on success and FAILURE on failure.
2882  */
2883
2884 static int wait_for_cmd_complete(nic_t * sp)
2885 {
2886         XENA_dev_config_t __iomem *bar0 = sp->bar0;
2887         int ret = FAILURE, cnt = 0;
2888         u64 val64;
2889
2890         while (TRUE) {
2891                 val64 = readq(&bar0->rmac_addr_cmd_mem);
2892                 if (!(val64 & RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
2893                         ret = SUCCESS;
2894                         break;
2895                 }
2896                 msleep(50);
2897                 if (cnt++ > 10)
2898                         break;
2899         }
2900
2901         return ret;
2902 }
2903
2904 /**
2905  *  s2io_reset - Resets the card.
2906  *  @sp : private member of the device structure.
2907  *  Description: Function to Reset the card. This function then also
2908  *  restores the previously saved PCI configuration space registers as
2909  *  the card reset also resets the configuration space.
2910  *  Return value:
2911  *  void.
2912  */
2913
2914 static void s2io_reset(nic_t * sp)
2915 {
2916         XENA_dev_config_t __iomem *bar0 = sp->bar0;
2917         u64 val64;
2918         u16 subid, pci_cmd;
2919
2920         /* Back up  the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
2921         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
2922
2923         val64 = SW_RESET_ALL;
2924         writeq(val64, &bar0->sw_reset);
2925
2926         /*
2927          * At this stage, if the PCI write is indeed completed, the
2928          * card is reset and so is the PCI Config space of the device.
2929          * So a read cannot be issued at this stage on any of the
2930          * registers to ensure the write into "sw_reset" register
2931          * has gone through.
2932          * Question: Is there any system call that will explicitly force
2933          * all the write commands still pending on the bus to be pushed
2934          * through?
2935          * As of now I'am just giving a 250ms delay and hoping that the
2936          * PCI write to sw_reset register is done by this time.
2937          */
2938         msleep(250);
2939
2940         /* Restore the PCI state saved during initialization. */
2941         pci_restore_state(sp->pdev);
2942         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
2943                                      pci_cmd);
2944         s2io_init_pci(sp);
2945
2946         msleep(250);
2947
2948         /* Set swapper to enable I/O register access */
2949         s2io_set_swapper(sp);
2950
2951         /* Restore the MSIX table entries from local variables */
2952         restore_xmsi_data(sp);
2953
2954         /* Clear certain PCI/PCI-X fields after reset */
2955         if (sp->device_type == XFRAME_II_DEVICE) {
2956                 /* Clear parity err detect bit */
2957                 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
2958
2959                 /* Clearing PCIX Ecc status register */
2960                 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
2961
2962                 /* Clearing PCI_STATUS error reflected here */
2963                 writeq(BIT(62), &bar0->txpic_int_reg);
2964         }
2965
2966         /* Reset device statistics maintained by OS */
2967         memset(&sp->stats, 0, sizeof (struct net_device_stats));
2968
2969         /* SXE-002: Configure link and activity LED to turn it off */
2970         subid = sp->pdev->subsystem_device;
2971         if (((subid & 0xFF) >= 0x07) &&
2972             (sp->device_type == XFRAME_I_DEVICE)) {
2973                 val64 = readq(&bar0->gpio_control);
2974                 val64 |= 0x0000800000000000ULL;
2975                 writeq(val64, &bar0->gpio_control);
2976                 val64 = 0x0411040400000000ULL;
2977                 writeq(val64, (void __iomem *)bar0 + 0x2700);
2978         }
2979
2980         /*
2981          * Clear spurious ECC interrupts that would have occured on
2982          * XFRAME II cards after reset.
2983          */
2984         if (sp->device_type == XFRAME_II_DEVICE) {
2985                 val64 = readq(&bar0->pcc_err_reg);
2986                 writeq(val64, &bar0->pcc_err_reg);
2987         }
2988
2989         sp->device_enabled_once = FALSE;
2990 }
2991
2992 /**
2993  *  s2io_set_swapper - to set the swapper controle on the card
2994  *  @sp : private member of the device structure,
2995  *  pointer to the s2io_nic structure.
2996  *  Description: Function to set the swapper control on the card
2997  *  correctly depending on the 'endianness' of the system.
2998  *  Return value:
2999  *  SUCCESS on success and FAILURE on failure.
3000  */
3001
3002 static int s2io_set_swapper(nic_t * sp)
3003 {
3004         struct net_device *dev = sp->dev;
3005         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3006         u64 val64, valt, valr;
3007
3008         /*
3009          * Set proper endian settings and verify the same by reading
3010          * the PIF Feed-back register.
3011          */
3012
3013         val64 = readq(&bar0->pif_rd_swapper_fb);
3014         if (val64 != 0x0123456789ABCDEFULL) {
3015                 int i = 0;
3016                 u64 value[] = { 0xC30000C3C30000C3ULL,   /* FE=1, SE=1 */
3017                                 0x8100008181000081ULL,  /* FE=1, SE=0 */
3018                                 0x4200004242000042ULL,  /* FE=0, SE=1 */
3019                                 0};                     /* FE=0, SE=0 */
3020
3021                 while(i<4) {
3022                         writeq(value[i], &bar0->swapper_ctrl);
3023                         val64 = readq(&bar0->pif_rd_swapper_fb);
3024                         if (val64 == 0x0123456789ABCDEFULL)
3025                                 break;
3026                         i++;
3027                 }
3028                 if (i == 4) {
3029                         DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3030                                 dev->name);
3031                         DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3032                                 (unsigned long long) val64);
3033                         return FAILURE;
3034                 }
3035                 valr = value[i];
3036         } else {
3037                 valr = readq(&bar0->swapper_ctrl);
3038         }
3039
3040         valt = 0x0123456789ABCDEFULL;
3041         writeq(valt, &bar0->xmsi_address);
3042         val64 = readq(&bar0->xmsi_address);
3043
3044         if(val64 != valt) {
3045                 int i = 0;
3046                 u64 value[] = { 0x00C3C30000C3C300ULL,  /* FE=1, SE=1 */
3047                                 0x0081810000818100ULL,  /* FE=1, SE=0 */
3048                                 0x0042420000424200ULL,  /* FE=0, SE=1 */
3049                                 0};                     /* FE=0, SE=0 */
3050
3051                 while(i<4) {
3052                         writeq((value[i] | valr), &bar0->swapper_ctrl);
3053                         writeq(valt, &bar0->xmsi_address);
3054                         val64 = readq(&bar0->xmsi_address);
3055                         if(val64 == valt)
3056                                 break;
3057                         i++;
3058                 }
3059                 if(i == 4) {
3060                         unsigned long long x = val64;
3061                         DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3062                         DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3063                         return FAILURE;
3064                 }
3065         }
3066         val64 = readq(&bar0->swapper_ctrl);
3067         val64 &= 0xFFFF000000000000ULL;
3068
3069 #ifdef  __BIG_ENDIAN
3070         /*
3071          * The device by default set to a big endian format, so a
3072          * big endian driver need not set anything.
3073          */
3074         val64 |= (SWAPPER_CTRL_TXP_FE |
3075                  SWAPPER_CTRL_TXP_SE |
3076                  SWAPPER_CTRL_TXD_R_FE |
3077                  SWAPPER_CTRL_TXD_W_FE |
3078                  SWAPPER_CTRL_TXF_R_FE |
3079                  SWAPPER_CTRL_RXD_R_FE |
3080                  SWAPPER_CTRL_RXD_W_FE |
3081                  SWAPPER_CTRL_RXF_W_FE |
3082                  SWAPPER_CTRL_XMSI_FE |
3083                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3084         if (sp->intr_type == INTA)
3085                 val64 |= SWAPPER_CTRL_XMSI_SE;
3086         writeq(val64, &bar0->swapper_ctrl);
3087 #else
3088         /*
3089          * Initially we enable all bits to make it accessible by the
3090          * driver, then we selectively enable only those bits that
3091          * we want to set.
3092          */
3093         val64 |= (SWAPPER_CTRL_TXP_FE |
3094                  SWAPPER_CTRL_TXP_SE |
3095                  SWAPPER_CTRL_TXD_R_FE |
3096                  SWAPPER_CTRL_TXD_R_SE |
3097                  SWAPPER_CTRL_TXD_W_FE |
3098                  SWAPPER_CTRL_TXD_W_SE |
3099                  SWAPPER_CTRL_TXF_R_FE |
3100                  SWAPPER_CTRL_RXD_R_FE |
3101                  SWAPPER_CTRL_RXD_R_SE |
3102                  SWAPPER_CTRL_RXD_W_FE |
3103                  SWAPPER_CTRL_RXD_W_SE |
3104                  SWAPPER_CTRL_RXF_W_FE |
3105                  SWAPPER_CTRL_XMSI_FE |
3106                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3107         if (sp->intr_type == INTA)
3108                 val64 |= SWAPPER_CTRL_XMSI_SE;
3109         writeq(val64, &bar0->swapper_ctrl);
3110 #endif
3111         val64 = readq(&bar0->swapper_ctrl);
3112
3113         /*
3114          * Verifying if endian settings are accurate by reading a
3115          * feedback register.
3116          */
3117         val64 = readq(&bar0->pif_rd_swapper_fb);
3118         if (val64 != 0x0123456789ABCDEFULL) {
3119                 /* Endian settings are incorrect, calls for another dekko. */
3120                 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3121                           dev->name);
3122                 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3123                           (unsigned long long) val64);
3124                 return FAILURE;
3125         }
3126
3127         return SUCCESS;
3128 }
3129
3130 static int wait_for_msix_trans(nic_t *nic, int i)
3131 {
3132         XENA_dev_config_t __iomem *bar0 = nic->bar0;
3133         u64 val64;
3134         int ret = 0, cnt = 0;
3135
3136         do {
3137                 val64 = readq(&bar0->xmsi_access);
3138                 if (!(val64 & BIT(15)))
3139                         break;
3140                 mdelay(1);
3141                 cnt++;
3142         } while(cnt < 5);
3143         if (cnt == 5) {
3144                 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3145                 ret = 1;
3146         }
3147
3148         return ret;
3149 }
3150
3151 static void restore_xmsi_data(nic_t *nic)
3152 {
3153         XENA_dev_config_t __iomem *bar0 = nic->bar0;
3154         u64 val64;
3155         int i;
3156
3157         for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3158                 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3159                 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3160                 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6));
3161                 writeq(val64, &bar0->xmsi_access);
3162                 if (wait_for_msix_trans(nic, i)) {
3163                         DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3164                         continue;
3165                 }
3166         }
3167 }
3168
3169 static void store_xmsi_data(nic_t *nic)
3170 {
3171         XENA_dev_config_t __iomem *bar0 = nic->bar0;
3172         u64 val64, addr, data;
3173         int i;
3174
3175         /* Store and display */
3176         for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3177                 val64 = (BIT(15) | vBIT(i, 26, 6));
3178                 writeq(val64, &bar0->xmsi_access);
3179                 if (wait_for_msix_trans(nic, i)) {
3180                         DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3181                         continue;
3182                 }
3183                 addr = readq(&bar0->xmsi_address);
3184                 data = readq(&bar0->xmsi_data);
3185                 if (addr && data) {
3186                         nic->msix_info[i].addr = addr;
3187                         nic->msix_info[i].data = data;
3188                 }
3189         }
3190 }
3191
3192 int s2io_enable_msi(nic_t *nic)
3193 {
3194         XENA_dev_config_t __iomem *bar0 = nic->bar0;
3195         u16 msi_ctrl, msg_val;
3196         struct config_param *config = &nic->config;
3197         struct net_device *dev = nic->dev;
3198         u64 val64, tx_mat, rx_mat;
3199         int i, err;
3200
3201         val64 = readq(&bar0->pic_control);
3202         val64 &= ~BIT(1);
3203         writeq(val64, &bar0->pic_control);
3204
3205         err = pci_enable_msi(nic->pdev);
3206         if (err) {
3207                 DBG_PRINT(ERR_DBG, "%s: enabling MSI failed\n",
3208                           nic->dev->name);
3209                 return err;
3210         }
3211
3212         /*
3213          * Enable MSI and use MSI-1 in stead of the standard MSI-0
3214          * for interrupt handling.
3215          */
3216         pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3217         msg_val ^= 0x1;
3218         pci_write_config_word(nic->pdev, 0x4c, msg_val);
3219         pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3220
3221         pci_read_config_word(nic->pdev, 0x42, &msi_ctrl);
3222         msi_ctrl |= 0x10;
3223         pci_write_config_word(nic->pdev, 0x42, msi_ctrl);
3224
3225         /* program MSI-1 into all usable Tx_Mat and Rx_Mat fields */
3226         tx_mat = readq(&bar0->tx_mat0_n[0]);
3227         for (i=0; i<config->tx_fifo_num; i++) {
3228                 tx_mat |= TX_MAT_SET(i, 1);
3229         }
3230         writeq(tx_mat, &bar0->tx_mat0_n[0]);
3231
3232         rx_mat = readq(&bar0->rx_mat);
3233         for (i=0; i<config->rx_ring_num; i++) {
3234                 rx_mat |= RX_MAT_SET(i, 1);
3235         }
3236         writeq(rx_mat, &bar0->rx_mat);
3237
3238         dev->irq = nic->pdev->irq;
3239         return 0;
3240 }
3241
3242 static int s2io_enable_msi_x(nic_t *nic)
3243 {
3244         XENA_dev_config_t __iomem *bar0 = nic->bar0;
3245         u64 tx_mat, rx_mat;
3246         u16 msi_control; /* Temp variable */
3247         int ret, i, j, msix_indx = 1;
3248
3249         nic->entries = kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct msix_entry),
3250                                GFP_KERNEL);
3251         if (nic->entries == NULL) {
3252                 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
3253                 return -ENOMEM;
3254         }
3255         memset(nic->entries, 0, MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3256
3257         nic->s2io_entries =
3258                 kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry),
3259                                    GFP_KERNEL);
3260         if (nic->s2io_entries == NULL) {
3261                 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
3262                 kfree(nic->entries);
3263                 return -ENOMEM;
3264         }
3265         memset(nic->s2io_entries, 0,
3266                MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3267
3268         for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3269                 nic->entries[i].entry = i;
3270                 nic->s2io_entries[i].entry = i;
3271                 nic->s2io_entries[i].arg = NULL;
3272                 nic->s2io_entries[i].in_use = 0;
3273         }
3274
3275         tx_mat = readq(&bar0->tx_mat0_n[0]);
3276         for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
3277                 tx_mat |= TX_MAT_SET(i, msix_indx);
3278                 nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
3279                 nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
3280                 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3281         }
3282         writeq(tx_mat, &bar0->tx_mat0_n[0]);
3283
3284         if (!nic->config.bimodal) {
3285                 rx_mat = readq(&bar0->rx_mat);
3286                 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3287                         rx_mat |= RX_MAT_SET(j, msix_indx);
3288                         nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j];
3289                         nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3290                         nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3291                 }
3292                 writeq(rx_mat, &bar0->rx_mat);
3293         } else {
3294                 tx_mat = readq(&bar0->tx_mat0_n[7]);
3295                 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3296                         tx_mat |= TX_MAT_SET(i, msix_indx);
3297                         nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j];
3298                         nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3299                         nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3300                 }
3301                 writeq(tx_mat, &bar0->tx_mat0_n[7]);
3302         }
3303
3304         ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
3305         if (ret) {
3306                 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3307                 kfree(nic->entries);
3308                 kfree(nic->s2io_entries);
3309                 nic->entries = NULL;
3310                 nic->s2io_entries = NULL;
3311                 return -ENOMEM;
3312         }
3313
3314         /*
3315          * To enable MSI-X, MSI also needs to be enabled, due to a bug
3316          * in the herc NIC. (Temp change, needs to be removed later)
3317          */
3318         pci_read_config_word(nic->pdev, 0x42, &msi_control);
3319         msi_control |= 0x1; /* Enable MSI */
3320         pci_write_config_word(nic->pdev, 0x42, msi_control);
3321
3322         return 0;
3323 }
3324
3325 /* ********************************************************* *
3326  * Functions defined below concern the OS part of the driver *
3327  * ********************************************************* */
3328
3329 /**
3330  *  s2io_open - open entry point of the driver
3331  *  @dev : pointer to the device structure.
3332  *  Description:
3333  *  This function is the open entry point of the driver. It mainly calls a
3334  *  function to allocate Rx buffers and inserts them into the buffer
3335  *  descriptors and then enables the Rx part of the NIC.
3336  *  Return value:
3337  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3338  *   file on failure.
3339  */
3340
3341 static int s2io_open(struct net_device *dev)
3342 {
3343         nic_t *sp = dev->priv;
3344         int err = 0;
3345         int i;
3346         u16 msi_control; /* Temp variable */
3347
3348         /*
3349          * Make sure you have link off by default every time
3350          * Nic is initialized
3351          */
3352         netif_carrier_off(dev);
3353         sp->last_link_state = 0;
3354
3355         /* Initialize H/W and enable interrupts */
3356         if (s2io_card_up(sp)) {
3357                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3358                           dev->name);
3359                 err = -ENODEV;
3360                 goto hw_init_failed;
3361         }
3362
3363         /* Store the values of the MSIX table in the nic_t structure */
3364         store_xmsi_data(sp);
3365
3366         /* After proper initialization of H/W, register ISR */
3367         if (sp->intr_type == MSI) {
3368                 err = request_irq((int) sp->pdev->irq, s2io_msi_handle, 
3369                         SA_SHIRQ, sp->name, dev);
3370                 if (err) {
3371                         DBG_PRINT(ERR_DBG, "%s: MSI registration \
3372 failed\n", dev->name);
3373                         goto isr_registration_failed;
3374                 }
3375         }
3376         if (sp->intr_type == MSI_X) {
3377                 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
3378                         if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
3379                                 sprintf(sp->desc1, "%s:MSI-X-%d-TX",
3380                                         dev->name, i);
3381                                 err = request_irq(sp->entries[i].vector,
3382                                           s2io_msix_fifo_handle, 0, sp->desc1,
3383                                           sp->s2io_entries[i].arg);
3384                                 DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc1, 
3385                                     (unsigned long long)sp->msix_info[i].addr);
3386                         } else {
3387                                 sprintf(sp->desc2, "%s:MSI-X-%d-RX",
3388                                         dev->name, i);
3389                                 err = request_irq(sp->entries[i].vector,
3390                                           s2io_msix_ring_handle, 0, sp->desc2,
3391                                           sp->s2io_entries[i].arg);
3392                                 DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc2, 
3393                                      (unsigned long long)sp->msix_info[i].addr);
3394                         }
3395                         if (err) {
3396                                 DBG_PRINT(ERR_DBG, "%s: MSI-X-%d registration \
3397 failed\n", dev->name, i);
3398                                 DBG_PRINT(ERR_DBG, "Returned: %d\n", err);
3399                                 goto isr_registration_failed;
3400                         }
3401                         sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
3402                 }
3403         }
3404         if (sp->intr_type == INTA) {
3405                 err = request_irq((int) sp->pdev->irq, s2io_isr, SA_SHIRQ,
3406                                 sp->name, dev);
3407                 if (err) {
3408                         DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
3409                                   dev->name);
3410                         goto isr_registration_failed;
3411                 }
3412         }
3413
3414         if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
3415                 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3416                 err = -ENODEV;
3417                 goto setting_mac_address_failed;
3418         }
3419
3420         netif_start_queue(dev);
3421         return 0;
3422
3423 setting_mac_address_failed:
3424         if (sp->intr_type != MSI_X)
3425                 free_irq(sp->pdev->irq, dev);
3426 isr_registration_failed:
3427         del_timer_sync(&sp->alarm_timer);
3428         if (sp->intr_type == MSI_X) {
3429                 if (sp->device_type == XFRAME_II_DEVICE) {
3430                         for (i=1; (sp->s2io_entries[i].in_use == 
3431                                 MSIX_REGISTERED_SUCCESS); i++) {
3432                                 int vector = sp->entries[i].vector;
3433                                 void *arg = sp->s2io_entries[i].arg;
3434
3435                                 free_irq(vector, arg);
3436                         }
3437                         pci_disable_msix(sp->pdev);
3438
3439                         /* Temp */
3440                         pci_read_config_word(sp->pdev, 0x42, &msi_control);
3441                         msi_control &= 0xFFFE; /* Disable MSI */
3442                         pci_write_config_word(sp->pdev, 0x42, msi_control);
3443                 }
3444         }
3445         else if (sp->intr_type == MSI)
3446                 pci_disable_msi(sp->pdev);
3447         s2io_reset(sp);
3448 hw_init_failed:
3449         if (sp->intr_type == MSI_X) {
3450                 if (sp->entries)
3451                         kfree(sp->entries);
3452                 if (sp->s2io_entries)
3453                         kfree(sp->s2io_entries);
3454         }
3455         return err;
3456 }
3457
3458 /**
3459  *  s2io_close -close entry point of the driver
3460  *  @dev : device pointer.
3461  *  Description:
3462  *  This is the stop entry point of the driver. It needs to undo exactly
3463  *  whatever was done by the open entry point,thus it's usually referred to
3464  *  as the close function.Among other things this function mainly stops the
3465  *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3466  *  Return value:
3467  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3468  *  file on failure.
3469  */
3470
3471 static int s2io_close(struct net_device *dev)
3472 {
3473         nic_t *sp = dev->priv;
3474         int i;
3475         u16 msi_control;
3476
3477         flush_scheduled_work();
3478         netif_stop_queue(dev);
3479         /* Reset card, kill tasklet and free Tx and Rx buffers. */
3480         s2io_card_down(sp);
3481
3482         if (sp->intr_type == MSI_X) {
3483                 if (sp->device_type == XFRAME_II_DEVICE) {
3484                         for (i=1; (sp->s2io_entries[i].in_use == 
3485                                         MSIX_REGISTERED_SUCCESS); i++) {
3486                                 int vector = sp->entries[i].vector;
3487                                 void *arg = sp->s2io_entries[i].arg;
3488
3489                                 free_irq(vector, arg);
3490                         }
3491                         pci_read_config_word(sp->pdev, 0x42, &msi_control);
3492                         msi_control &= 0xFFFE; /* Disable MSI */
3493                         pci_write_config_word(sp->pdev, 0x42, msi_control);
3494
3495                         pci_disable_msix(sp->pdev);
3496                 }
3497         }
3498         else {
3499                 free_irq(sp->pdev->irq, dev);
3500                 if (sp->intr_type == MSI)
3501                         pci_disable_msi(sp->pdev);
3502         }       
3503         sp->device_close_flag = TRUE;   /* Device is shut down. */
3504         return 0;
3505 }
3506
3507 /**
3508  *  s2io_xmit - Tx entry point of te driver
3509  *  @skb : the socket buffer containing the Tx data.
3510  *  @dev : device pointer.
3511  *  Description :
3512  *  This function is the Tx entry point of the driver. S2IO NIC supports
3513  *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
3514  *  NOTE: when device cant queue the pkt,just the trans_start variable will
3515  *  not be upadted.
3516  *  Return value:
3517  *  0 on success & 1 on failure.
3518  */
3519
3520 static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3521 {
3522         nic_t *sp = dev->priv;
3523         u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3524         register u64 val64;
3525         TxD_t *txdp;
3526         TxFIFO_element_t __iomem *tx_fifo;
3527         unsigned long flags;
3528 #ifdef NETIF_F_TSO
3529         int mss;
3530 #endif
3531         u16 vlan_tag = 0;
3532         int vlan_priority = 0;
3533         mac_info_t *mac_control;
3534         struct config_param *config;
3535
3536         mac_control = &sp->mac_control;
3537         config = &sp->config;
3538
3539         DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
3540         spin_lock_irqsave(&sp->tx_lock, flags);
3541         if (atomic_read(&sp->card_state) == CARD_DOWN) {
3542                 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
3543                           dev->name);
3544                 spin_unlock_irqrestore(&sp->tx_lock, flags);
3545                 dev_kfree_skb(skb);
3546                 return 0;
3547         }
3548
3549         queue = 0;
3550
3551         /* Get Fifo number to Transmit based on vlan priority */
3552         if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3553                 vlan_tag = vlan_tx_tag_get(skb);
3554                 vlan_priority = vlan_tag >> 13;
3555                 queue = config->fifo_mapping[vlan_priority];
3556         }
3557
3558         put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
3559         get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
3560         txdp = (TxD_t *) mac_control->fifos[queue].list_info[put_off].
3561                 list_virt_addr;
3562
3563         queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
3564         /* Avoid "put" pointer going beyond "get" pointer */
3565         if (txdp->Host_Control ||
3566                    ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
3567                 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
3568                 netif_stop_queue(dev);
3569                 dev_kfree_skb(skb);
3570                 spin_unlock_irqrestore(&sp->tx_lock, flags);
3571                 return 0;
3572         }
3573
3574         /* A buffer with no data will be dropped */
3575         if (!skb->len) {
3576                 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
3577                 dev_kfree_skb(skb);
3578                 spin_unlock_irqrestore(&sp->tx_lock, flags);
3579                 return 0;
3580         }
3581
3582         txdp->Control_1 = 0;
3583         txdp->Control_2 = 0;
3584 #ifdef NETIF_F_TSO
3585         mss = skb_shinfo(skb)->tso_size;
3586         if (mss) {
3587                 txdp->Control_1 |= TXD_TCP_LSO_EN;
3588                 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
3589         }
3590 #endif
3591         if (skb->ip_summed == CHECKSUM_HW) {
3592                 txdp->Control_2 |=
3593                     (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
3594                      TXD_TX_CKO_UDP_EN);
3595         }
3596         txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
3597         txdp->Control_1 |= TXD_LIST_OWN_XENA;
3598         txdp->Control_2 |= config->tx_intr_type;
3599
3600         if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3601                 txdp->Control_2 |= TXD_VLAN_ENABLE;
3602                 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
3603         }
3604
3605         frg_len = skb->len - skb->data_len;
3606         if (skb_shinfo(skb)->ufo_size) {
3607                 int ufo_size;
3608
3609                 ufo_size = skb_shinfo(skb)->ufo_size;
3610                 ufo_size &= ~7;
3611                 txdp->Control_1 |= TXD_UFO_EN;
3612                 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
3613                 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
3614 #ifdef __BIG_ENDIAN
3615                 sp->ufo_in_band_v[put_off] =
3616                                 (u64)skb_shinfo(skb)->ip6_frag_id;
3617 #else
3618                 sp->ufo_in_band_v[put_off] =
3619                                 (u64)skb_shinfo(skb)->ip6_frag_id << 32;
3620 #endif
3621                 txdp->Host_Control = (unsigned long)sp->ufo_in_band_v;
3622                 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
3623                                         sp->ufo_in_band_v,
3624                                         sizeof(u64), PCI_DMA_TODEVICE);
3625                 txdp++;
3626                 txdp->Control_1 = 0;
3627                 txdp->Control_2 = 0;
3628         }
3629
3630         txdp->Buffer_Pointer = pci_map_single
3631             (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
3632         txdp->Host_Control = (unsigned long) skb;
3633         txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
3634
3635         if (skb_shinfo(skb)->ufo_size)
3636                 txdp->Control_1 |= TXD_UFO_EN;
3637
3638         frg_cnt = skb_shinfo(skb)->nr_frags;
3639         /* For fragmented SKB. */
3640         for (i = 0; i < frg_cnt; i++) {
3641                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3642                 /* A '0' length fragment will be ignored */
3643                 if (!frag->size)
3644                         continue;
3645                 txdp++;
3646                 txdp->Buffer_Pointer = (u64) pci_map_page
3647                     (sp->pdev, frag->page, frag->page_offset,
3648                      frag->size, PCI_DMA_TODEVICE);
3649                 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
3650                 if (skb_shinfo(skb)->ufo_size)
3651                         txdp->Control_1 |= TXD_UFO_EN;
3652         }
3653         txdp->Control_1 |= TXD_GATHER_CODE_LAST;
3654
3655         if (skb_shinfo(skb)->ufo_size)
3656                 frg_cnt++; /* as Txd0 was used for inband header */
3657
3658         tx_fifo = mac_control->tx_FIFO_start[queue];
3659         val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
3660         writeq(val64, &tx_fifo->TxDL_Pointer);
3661
3662         val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
3663                  TX_FIFO_LAST_LIST);
3664
3665 #ifdef NETIF_F_TSO
3666         if (mss)
3667                 val64 |= TX_FIFO_SPECIAL_FUNC;
3668 #endif
3669         if (skb_shinfo(skb)->ufo_size)
3670                 val64 |= TX_FIFO_SPECIAL_FUNC;
3671         writeq(val64, &tx_fifo->List_Control);
3672
3673         mmiowb();
3674
3675         put_off++;
3676         if (put_off == mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1)
3677                 put_off = 0;
3678         mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
3679
3680         /* Avoid "put" pointer going beyond "get" pointer */
3681         if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
3682                 DBG_PRINT(TX_DBG,
3683                           "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
3684                           put_off, get_off);
3685                 netif_stop_queue(dev);
3686         }
3687
3688         dev->trans_start = jiffies;
3689         spin_unlock_irqrestore(&sp->tx_lock, flags);
3690
3691         return 0;
3692 }
3693
3694 static void
3695 s2io_alarm_handle(unsigned long data)
3696 {
3697         nic_t *sp = (nic_t *)data;
3698
3699         alarm_intr_handler(sp);
3700         mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
3701 }
3702
3703 static irqreturn_t
3704 s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs)
3705 {
3706         struct net_device *dev = (struct net_device *) dev_id;
3707         nic_t *sp = dev->priv;
3708         int i;
3709         int ret;
3710         mac_info_t *mac_control;
3711         struct config_param *config;
3712
3713         atomic_inc(&sp->isr_cnt);
3714         mac_control = &sp->mac_control;
3715         config = &sp->config;
3716         DBG_PRINT(INTR_DBG, "%s: MSI handler\n", __FUNCTION__);
3717
3718         /* If Intr is because of Rx Traffic */
3719         for (i = 0; i < config->rx_ring_num; i++)
3720                 rx_intr_handler(&mac_control->rings[i]);
3721
3722         /* If Intr is because of Tx Traffic */
3723         for (i = 0; i < config->tx_fifo_num; i++)
3724                 tx_intr_handler(&mac_control->fifos[i]);
3725
3726         /*
3727          * If the Rx buffer count is below the panic threshold then
3728          * reallocate the buffers from the interrupt handler itself,
3729          * else schedule a tasklet to reallocate the buffers.
3730          */
3731         for (i = 0; i < config->rx_ring_num; i++) {
3732                 if (!sp->lro) {
3733                         int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
3734                         int level = rx_buffer_level(sp, rxb_size, i);
3735
3736                         if ((level == PANIC) && (!TASKLET_IN_USE)) {
3737                                 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", 
3738                                                         dev->name);
3739                                 DBG_PRINT(INTR_DBG, "PANIC levels\n");
3740                                 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
3741                                         DBG_PRINT(ERR_DBG, "%s:Out of memory",
3742                                                   dev->name);
3743                                         DBG_PRINT(ERR_DBG, " in ISR!!\n");
3744                                         clear_bit(0, (&sp->tasklet_status));
3745                                         atomic_dec(&sp->isr_cnt);
3746                                         return IRQ_HANDLED;
3747                                 }
3748                                 clear_bit(0, (&sp->tasklet_status));
3749                         } else if (level == LOW) {
3750                                 tasklet_schedule(&sp->task);
3751                         }
3752                 }
3753                 else if (fill_rx_buffers(sp, i) == -ENOMEM) {
3754                                 DBG_PRINT(ERR_DBG, "%s:Out of memory",
3755                                                         dev->name);
3756                                 DBG_PRINT(ERR_DBG, " in Rx Intr!!\n");
3757                                 break;
3758                 }
3759         }
3760
3761         atomic_dec(&sp->isr_cnt);
3762         return IRQ_HANDLED;
3763 }
3764
3765 static irqreturn_t
3766 s2io_msix_ring_handle(int irq, void *dev_id, struct pt_regs *regs)
3767 {
3768         ring_info_t *ring = (ring_info_t *)dev_id;
3769         nic_t *sp = ring->nic;
3770         struct net_device *dev = (struct net_device *) dev_id;
3771         int rxb_size, level, rng_n;
3772
3773         atomic_inc(&sp->isr_cnt);
3774         rx_intr_handler(ring);
3775
3776         rng_n = ring->ring_no;
3777         if (!sp->lro) {
3778                 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
3779                 level = rx_buffer_level(sp, rxb_size, rng_n);
3780
3781                 if ((level == PANIC) && (!TASKLET_IN_USE)) {
3782                         int ret;
3783                         DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
3784                         DBG_PRINT(INTR_DBG, "PANIC levels\n");
3785                         if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
3786                                 DBG_PRINT(ERR_DBG, "Out of memory in %s",
3787                                           __FUNCTION__);
3788                                 clear_bit(0, (&sp->tasklet_status));
3789                                 return IRQ_HANDLED;
3790                         }
3791                         clear_bit(0, (&sp->tasklet_status));
3792                 } else if (level == LOW) {
3793                         tasklet_schedule(&sp->task);
3794                 }
3795         }
3796         else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
3797                         DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
3798                         DBG_PRINT(ERR_DBG, " in Rx Intr!!\n");
3799         }
3800
3801         atomic_dec(&sp->isr_cnt);
3802
3803         return IRQ_HANDLED;
3804 }
3805
3806 static irqreturn_t
3807 s2io_msix_fifo_handle(int irq, void *dev_id, struct pt_regs *regs)
3808 {
3809         fifo_info_t *fifo = (fifo_info_t *)dev_id;
3810         nic_t *sp = fifo->nic;
3811
3812         atomic_inc(&sp->isr_cnt);
3813         tx_intr_handler(fifo);
3814         atomic_dec(&sp->isr_cnt);
3815         return IRQ_HANDLED;
3816 }
3817
3818 static void s2io_txpic_intr_handle(nic_t *sp)
3819 {
3820         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3821         u64 val64;
3822
3823         val64 = readq(&bar0->pic_int_status);
3824         if (val64 & PIC_INT_GPIO) {
3825                 val64 = readq(&bar0->gpio_int_reg);
3826                 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
3827                     (val64 & GPIO_INT_REG_LINK_UP)) {
3828                         val64 |=  GPIO_INT_REG_LINK_DOWN;
3829                         val64 |= GPIO_INT_REG_LINK_UP;
3830                         writeq(val64, &bar0->gpio_int_reg);
3831                         goto masking;
3832                 }
3833
3834                 if (((sp->last_link_state == LINK_UP) &&
3835                         (val64 & GPIO_INT_REG_LINK_DOWN)) ||
3836                 ((sp->last_link_state == LINK_DOWN) &&
3837                 (val64 & GPIO_INT_REG_LINK_UP))) {
3838                         val64 = readq(&bar0->gpio_int_mask);
3839                         val64 |=  GPIO_INT_MASK_LINK_DOWN;
3840                         val64 |= GPIO_INT_MASK_LINK_UP;
3841                         writeq(val64, &bar0->gpio_int_mask);
3842                         s2io_set_link((unsigned long)sp);
3843                 }
3844 masking:
3845                 if (sp->last_link_state == LINK_UP) {
3846                         /*enable down interrupt */
3847                         val64 = readq(&bar0->gpio_int_mask);
3848                         /* unmasks link down intr */
3849                         val64 &=  ~GPIO_INT_MASK_LINK_DOWN;
3850                         /* masks link up intr */
3851                         val64 |= GPIO_INT_MASK_LINK_UP;
3852                         writeq(val64, &bar0->gpio_int_mask);
3853                 } else {
3854                         /*enable UP Interrupt */
3855                         val64 = readq(&bar0->gpio_int_mask);
3856                         /* unmasks link up interrupt */
3857                         val64 &= ~GPIO_INT_MASK_LINK_UP;
3858                         /* masks link down interrupt */
3859                         val64 |=  GPIO_INT_MASK_LINK_DOWN;
3860                         writeq(val64, &bar0->gpio_int_mask);
3861                 }
3862         }
3863 }
3864
3865 /**
3866  *  s2io_isr - ISR handler of the device .
3867  *  @irq: the irq of the device.
3868  *  @dev_id: a void pointer to the dev structure of the NIC.
3869  *  @pt_regs: pointer to the registers pushed on the stack.
3870  *  Description:  This function is the ISR handler of the device. It
3871  *  identifies the reason for the interrupt and calls the relevant
3872  *  service routines. As a contongency measure, this ISR allocates the
3873  *  recv buffers, if their numbers are below the panic value which is
3874  *  presently set to 25% of the original number of rcv buffers allocated.
3875  *  Return value:
3876  *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
3877  *   IRQ_NONE: will be returned if interrupt is not from our device
3878  */
3879 static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
3880 {
3881         struct net_device *dev = (struct net_device *) dev_id;
3882         nic_t *sp = dev->priv;
3883         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3884         int i;
3885         u64 reason = 0, val64;
3886         mac_info_t *mac_control;
3887         struct config_param *config;
3888
3889         atomic_inc(&sp->isr_cnt);
3890         mac_control = &sp->mac_control;
3891         config = &sp->config;
3892
3893         /*
3894          * Identify the cause for interrupt and call the appropriate
3895          * interrupt handler. Causes for the interrupt could be;
3896          * 1. Rx of packet.
3897          * 2. Tx complete.
3898          * 3. Link down.
3899          * 4. Error in any functional blocks of the NIC.
3900          */
3901         reason = readq(&bar0->general_int_status);
3902
3903         if (!reason) {
3904                 /* The interrupt was not raised by Xena. */
3905                 atomic_dec(&sp->isr_cnt);
3906                 return IRQ_NONE;
3907         }
3908
3909         val64 = 0xFFFFFFFFFFFFFFFFULL;
3910 #ifdef CONFIG_S2IO_NAPI
3911         if (reason & GEN_INTR_RXTRAFFIC) {
3912                 if (netif_rx_schedule_prep(dev)) {
3913                         writeq(val64, &bar0->rx_traffic_mask);
3914                         __netif_rx_schedule(dev);
3915                 }
3916         }
3917 #else
3918         /*
3919          * Rx handler is called by default, without checking for the
3920          * cause of interrupt.
3921          * rx_traffic_int reg is an R1 register, writing all 1's
3922          * will ensure that the actual interrupt causing bit get's
3923          * cleared and hence a read can be avoided.
3924          */
3925         writeq(val64, &bar0->rx_traffic_int);
3926         for (i = 0; i < config->rx_ring_num; i++) {
3927                 rx_intr_handler(&mac_control->rings[i]);
3928         }
3929 #endif
3930
3931         /*
3932          * tx_traffic_int reg is an R1 register, writing all 1's
3933          * will ensure that the actual interrupt causing bit get's
3934          * cleared and hence a read can be avoided.
3935          */
3936         writeq(val64, &bar0->tx_traffic_int);
3937
3938         for (i = 0; i < config->tx_fifo_num; i++)
3939                 tx_intr_handler(&mac_control->fifos[i]);
3940
3941         if (reason & GEN_INTR_TXPIC)
3942                 s2io_txpic_intr_handle(sp);
3943         /*
3944          * If the Rx buffer count is below the panic threshold then
3945          * reallocate the buffers from the interrupt handler itself,
3946          * else schedule a tasklet to reallocate the buffers.
3947          */
3948 #ifndef CONFIG_S2IO_NAPI
3949         for (i = 0; i < config->rx_ring_num; i++) {
3950                 if (!sp->lro) {
3951                         int ret;
3952                         int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
3953                         int level = rx_buffer_level(sp, rxb_size, i);
3954
3955                         if ((level == PANIC) && (!TASKLET_IN_USE)) {
3956                                 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", 
3957                                                         dev->name);
3958                                 DBG_PRINT(INTR_DBG, "PANIC levels\n");
3959                                 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
3960                                         DBG_PRINT(ERR_DBG, "%s:Out of memory",
3961                                                   dev->name);
3962                                         DBG_PRINT(ERR_DBG, " in ISR!!\n");
3963                                         clear_bit(0, (&sp->tasklet_status));
3964                                         atomic_dec(&sp->isr_cnt);
3965                                         return IRQ_HANDLED;
3966                                 }
3967                                 clear_bit(0, (&sp->tasklet_status));
3968                         } else if (level == LOW) {
3969                                 tasklet_schedule(&sp->task);
3970                         }
3971                 }
3972                 else if (fill_rx_buffers(sp, i) == -ENOMEM) {
3973                                 DBG_PRINT(ERR_DBG, "%s:Out of memory",
3974                                                         dev->name);
3975                                 DBG_PRINT(ERR_DBG, " in Rx intr!!\n");
3976                                 break;
3977                 }
3978         }
3979 #endif
3980
3981         atomic_dec(&sp->isr_cnt);
3982         return IRQ_HANDLED;
3983 }
3984
3985 /**
3986  * s2io_updt_stats -
3987  */
3988 static void s2io_updt_stats(nic_t *sp)
3989 {
3990         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3991         u64 val64;
3992         int cnt = 0;
3993
3994         if (atomic_read(&sp->card_state) == CARD_UP) {
3995                 /* Apprx 30us on a 133 MHz bus */
3996                 val64 = SET_UPDT_CLICKS(10) |
3997                         STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
3998                 writeq(val64, &bar0->stat_cfg);
3999                 do {
4000                         udelay(100);
4001                         val64 = readq(&bar0->stat_cfg);
4002                         if (!(val64 & BIT(0)))
4003                                 break;
4004                         cnt++;
4005                         if (cnt == 5)
4006                                 break; /* Updt failed */
4007                 } while(1);
4008         }
4009 }
4010
4011 /**
4012  *  s2io_get_stats - Updates the device statistics structure.
4013  *  @dev : pointer to the device structure.
4014  *  Description:
4015  *  This function updates the device statistics structure in the s2io_nic
4016  *  structure and returns a pointer to the same.
4017  *  Return value:
4018  *  pointer to the updated net_device_stats structure.
4019  */
4020
4021 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4022 {
4023         nic_t *sp = dev->priv;
4024         mac_info_t *mac_control;
4025         struct config_param *config;
4026
4027
4028         mac_control = &sp->mac_control;
4029         config = &sp->config;
4030
4031         /* Configure Stats for immediate updt */
4032         s2io_updt_stats(sp);
4033
4034         sp->stats.tx_packets =
4035                 le32_to_cpu(mac_control->stats_info->tmac_frms);
4036         sp->stats.tx_errors =
4037                 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4038         sp->stats.rx_errors =
4039                 le32_to_cpu(mac_control->stats_info->rmac_drop_frms);
4040         sp->stats.multicast =
4041                 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
4042         sp->stats.rx_length_errors =
4043                 le32_to_cpu(mac_control->stats_info->rmac_long_frms);
4044
4045         return (&sp->stats);
4046 }
4047
4048 /**
4049  *  s2io_set_multicast - entry point for multicast address enable/disable.
4050  *  @dev : pointer to the device structure
4051  *  Description:
4052  *  This function is a driver entry point which gets called by the kernel
4053  *  whenever multicast addresses must be enabled/disabled. This also gets
4054  *  called to set/reset promiscuous mode. Depending on the deivce flag, we
4055  *  determine, if multicast address must be enabled or if promiscuous mode
4056  *  is to be disabled etc.
4057  *  Return value:
4058  *  void.
4059  */
4060
4061 static void s2io_set_multicast(struct net_device *dev)
4062 {
4063         int i, j, prev_cnt;
4064         struct dev_mc_list *mclist;
4065         nic_t *sp = dev->priv;
4066         XENA_dev_config_t __iomem *bar0 = sp->bar0;
4067         u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4068             0xfeffffffffffULL;
4069         u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
4070         void __iomem *add;
4071
4072         if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4073                 /*  Enable all Multicast addresses */
4074                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4075                        &bar0->rmac_addr_data0_mem);
4076                 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4077                        &bar0->rmac_addr_data1_mem);
4078                 val64 = RMAC_ADDR_CMD_MEM_WE |
4079                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4080                     RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
4081                 writeq(val64, &bar0->rmac_addr_cmd_mem);
4082                 /* Wait till command completes */
4083                 wait_for_cmd_complete(sp);
4084
4085                 sp->m_cast_flg = 1;
4086                 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
4087         } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4088                 /*  Disable all Multicast addresses */
4089                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4090                        &bar0->rmac_addr_data0_mem);
4091                 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4092                        &bar0->rmac_addr_data1_mem);
4093                 val64 = RMAC_ADDR_CMD_MEM_WE |
4094                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4095                     RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4096                 writeq(val64, &bar0->rmac_addr_cmd_mem);
4097                 /* Wait till command completes */
4098                 wait_for_cmd_complete(sp);
4099
4100                 sp->m_cast_flg = 0;
4101                 sp->all_multi_pos = 0;
4102         }
4103
4104         if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4105                 /*  Put the NIC into promiscuous mode */
4106                 add = &bar0->mac_cfg;
4107                 val64 = readq(&bar0->mac_cfg);
4108                 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4109
4110                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4111                 writel((u32) val64, add);
4112                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4113                 writel((u32) (val64 >> 32), (add + 4));
4114
4115                 val64 = readq(&bar0->mac_cfg);
4116                 sp->promisc_flg = 1;
4117                 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4118                           dev->name);
4119         } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4120                 /*  Remove the NIC from promiscuous mode */
4121                 add = &bar0->mac_cfg;
4122                 val64 = readq(&bar0->mac_cfg);
4123                 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4124
4125                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4126                 writel((u32) val64, add);
4127                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4128                 writel((u32) (val64 >> 32), (add + 4));
4129
4130                 val64 = readq(&bar0->mac_cfg);
4131                 sp->promisc_flg = 0;
4132                 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
4133                           dev->name);
4134         }
4135
4136         /*  Update individual M_CAST address list */
4137         if ((!sp->m_cast_flg) && dev->mc_count) {
4138                 if (dev->mc_count >
4139                     (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
4140                         DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
4141                                   dev->name);
4142                         DBG_PRINT(ERR_DBG, "can be added, please enable ");
4143                         DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
4144                         return;
4145                 }
4146
4147                 prev_cnt = sp->mc_addr_count;
4148                 sp->mc_addr_count = dev->mc_count;
4149
4150                 /* Clear out the previous list of Mc in the H/W. */
4151                 for (i = 0; i < prev_cnt; i++) {
4152                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4153                                &bar0->rmac_addr_data0_mem);
4154                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4155                                 &bar0->rmac_addr_data1_mem);
4156                         val64 = RMAC_ADDR_CMD_MEM_WE |
4157                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4158                             RMAC_ADDR_CMD_MEM_OFFSET
4159                             (MAC_MC_ADDR_START_OFFSET + i);
4160                         writeq(val64, &bar0->rmac_addr_cmd_mem);
4161
4162                         /* Wait for command completes */
4163                         if (wait_for_cmd_complete(sp)) {
4164                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
4165                                           dev->name);
4166                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4167                                 return;
4168                         }
4169                 }
4170
4171                 /* Create the new Rx filter list and update the same in H/W. */
4172                 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
4173                      i++, mclist = mclist->next) {
4174                         memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
4175                                ETH_ALEN);
4176                         mac_addr = 0;
4177                         for (j = 0; j < ETH_ALEN; j++) {
4178                                 mac_addr |= mclist->dmi_addr[j];
4179                                 mac_addr <<= 8;
4180                         }
4181                         mac_addr >>= 8;
4182                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4183                                &bar0->rmac_addr_data0_mem);
4184                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4185                                 &bar0->rmac_addr_data1_mem);
4186                         val64 = RMAC_ADDR_CMD_MEM_WE |
4187                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4188                             RMAC_ADDR_CMD_MEM_OFFSET
4189                             (i + MAC_MC_ADDR_START_OFFSET);
4190                         writeq(val64, &bar0->rmac_addr_cmd_mem);
4191
4192                         /* Wait for command completes */
4193                         if (wait_for_cmd_complete(sp)) {
4194                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
4195                                           dev->name);
4196                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4197                                 return;
4198                         }
4199                 }
4200         }
4201 }
4202
4203 /**
4204  *  s2io_set_mac_addr - Programs the Xframe mac address
4205  *  @dev : pointer to the device structure.
4206  *  @addr: a uchar pointer to the new mac address which is to be set.
4207  *  Description : This procedure will program the Xframe to receive
4208  *  frames with new Mac Address
4209  *  Return value: SUCCESS on success and an appropriate (-)ve integer
4210  *  as defined in errno.h file on failure.
4211  */
4212
4213 static int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
4214 {
4215         nic_t *sp = dev->priv;
4216         XENA_dev_config_t __iomem *bar0 = sp->bar0;
4217         register u64 val64, mac_addr = 0;
4218         int i;
4219
4220         /*
4221          * Set the new MAC address as the new unicast filter and reflect this
4222          * change on the device address registered with the OS. It will be
4223          * at offset 0.
4224          */
4225         for (i = 0; i < ETH_ALEN; i++) {
4226                 mac_addr <<= 8;
4227                 mac_addr |= addr[i];
4228         }
4229
4230         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4231                &bar0->rmac_addr_data0_mem);
4232
4233         val64 =
4234             RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4235             RMAC_ADDR_CMD_MEM_OFFSET(0);
4236         writeq(val64, &bar0->rmac_addr_cmd_mem);
4237         /* Wait till command completes */
4238         if (wait_for_cmd_complete(sp)) {
4239                 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
4240                 return FAILURE;
4241         }
4242
4243         return SUCCESS;
4244 }
4245
4246 /**
4247  * s2io_ethtool_sset - Sets different link parameters.
4248  * @sp : private member of the device structure, which is a pointer to the  * s2io_nic structure.
4249  * @info: pointer to the structure with parameters given by ethtool to set
4250  * link information.
4251  * Description:
4252  * The function sets different link parameters provided by the user onto
4253  * the NIC.
4254  * Return value:
4255  * 0 on success.
4256 */
4257
4258 static int s2io_ethtool_sset(struct net_device *dev,
4259                              struct ethtool_cmd *info)
4260 {
4261         nic_t *sp = dev->priv;
4262         if ((info->autoneg == AUTONEG_ENABLE) ||
4263             (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
4264                 return -EINVAL;
4265         else {
4266                 s2io_close(sp->dev);
4267                 s2io_open(sp->dev);
4268         }
4269
4270         return 0;
4271 }
4272
4273 /**
4274  * s2io_ethtol_gset - Return link specific information.
4275  * @sp : private member of the device structure, pointer to the
4276  *      s2io_nic structure.
4277  * @info : pointer to the structure with parameters given by ethtool
4278  * to return link information.
4279  * Description:
4280  * Returns link specific information like speed, duplex etc.. to ethtool.
4281  * Return value :
4282  * return 0 on success.
4283  */
4284
4285 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
4286 {
4287         nic_t *sp = dev->priv;
4288         info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4289         info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4290         info->port = PORT_FIBRE;
4291         /* info->transceiver?? TODO */
4292
4293         if (netif_carrier_ok(sp->dev)) {
4294                 info->speed = 10000;
4295                 info->duplex = DUPLEX_FULL;
4296         } else {
4297                 info->speed = -1;
4298                 info->duplex = -1;
4299         }
4300
4301         info->autoneg = AUTONEG_DISABLE;
4302         return 0;
4303 }
4304
4305 /**
4306  * s2io_ethtool_gdrvinfo - Returns driver specific information.
4307  * @sp : private member of the device structure, which is a pointer to the
4308  * s2io_nic structure.
4309  * @info : pointer to the structure with parameters given by ethtool to
4310  * return driver information.
4311  * Description:
4312  * Returns driver specefic information like name, version etc.. to ethtool.
4313  * Return value:
4314  *  void
4315  */
4316
4317 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
4318                                   struct ethtool_drvinfo *info)
4319 {
4320         nic_t *sp = dev->priv;
4321
4322         strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
4323         strncpy(info->version, s2io_driver_version, sizeof(info->version));
4324         strncpy(info->fw_version, "", sizeof(info->fw_version));
4325         strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
4326         info->regdump_len = XENA_REG_SPACE;
4327         info->eedump_len = XENA_EEPROM_SPACE;
4328         info->testinfo_len = S2IO_TEST_LEN;
4329         info->n_stats = S2IO_STAT_LEN;
4330 }
4331
4332 /**
4333  *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
4334  *  @sp: private member of the device structure, which is a pointer to the
4335  *  s2io_nic structure.
4336  *  @regs : pointer to the structure with parameters given by ethtool for
4337  *  dumping the registers.
4338  *  @reg_space: The input argumnet into which all the registers are dumped.
4339  *  Description:
4340  *  Dumps the entire register space of xFrame NIC into the user given
4341  *  buffer area.
4342  * Return value :
4343  * void .
4344 */
4345
4346 static void s2io_ethtool_gregs(struct net_device *dev,
4347                                struct ethtool_regs *regs, void *space)
4348 {
4349         int i;
4350         u64 reg;
4351         u8 *reg_space = (u8 *) space;
4352         nic_t *sp = dev->priv;
4353
4354         regs->len = XENA_REG_SPACE;
4355         regs->version = sp->pdev->subsystem_device;
4356
4357         for (i = 0; i < regs->len; i += 8) {
4358                 reg = readq(sp->bar0 + i);
4359                 memcpy((reg_space + i), &reg, 8);
4360         }
4361 }
4362
4363 /**
4364  *  s2io_phy_id  - timer function that alternates adapter LED.
4365  *  @data : address of the private member of the device structure, which
4366  *  is a pointer to the s2io_nic structure, provided as an u32.
4367  * Description: This is actually the timer function that alternates the
4368  * adapter LED bit of the adapter control bit to set/reset every time on
4369  * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
4370  *  once every second.
4371 */
4372 static void s2io_phy_id(unsigned long data)
4373 {
4374         nic_t *sp = (nic_t *) data;
4375         XENA_dev_config_t __iomem *bar0 = sp->bar0;
4376         u64 val64 = 0;
4377         u16 subid;
4378
4379         subid = sp->pdev->subsystem_device;
4380         if ((sp->device_type == XFRAME_II_DEVICE) ||
4381                    ((subid & 0xFF) >= 0x07)) {
4382                 val64 = readq(&bar0->gpio_control);
4383                 val64 ^= GPIO_CTRL_GPIO_0;
4384                 writeq(val64, &bar0->gpio_control);
4385         } else {
4386                 val64 = readq(&bar0->adapter_control);
4387                 val64 ^= ADAPTER_LED_ON;
4388                 writeq(val64, &bar0->adapter_control);
4389         }
4390
4391         mod_timer(&sp->id_timer, jiffies + HZ / 2);
4392 }
4393
4394 /**
4395  * s2io_ethtool_idnic - To physically identify the nic on the system.
4396  * @sp : private member of the device structure, which is a pointer to the
4397  * s2io_nic structure.
4398  * @id : pointer to the structure with identification parameters given by
4399  * ethtool.
4400  * Description: Used to physically identify the NIC on the system.
4401  * The Link LED will blink for a time specified by the user for
4402  * identification.
4403  * NOTE: The Link has to be Up to be able to blink the LED. Hence
4404  * identification is possible only if it's link is up.
4405  * Return value:
4406  * int , returns 0 on success
4407  */
4408
4409 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
4410 {
4411         u64 val64 = 0, last_gpio_ctrl_val;
4412         nic_t *sp = dev->priv;
4413         XENA_dev_config_t __iomem *bar0 = sp->bar0;
4414         u16 subid;
4415
4416         subid = sp->pdev->subsystem_device;
4417         last_gpio_ctrl_val = readq(&bar0->gpio_control);
4418         if ((sp->device_type == XFRAME_I_DEVICE) &&
4419                 ((subid & 0xFF) < 0x07)) {
4420                 val64 = readq(&bar0->adapter_control);
4421                 if (!(val64 & ADAPTER_CNTL_EN)) {
4422                         printk(KERN_ERR
4423                                "Adapter Link down, cannot blink LED\n");
4424                         return -EFAULT;
4425                 }
4426         }
4427         if (sp->id_timer.function == NULL) {
4428                 init_timer(&sp->id_timer);
4429                 sp->id_timer.function = s2io_phy_id;
4430                 sp->id_timer.data = (unsigned long) sp;
4431         }
4432         mod_timer(&sp->id_timer, jiffies);
4433         if (data)
4434                 msleep_interruptible(data * HZ);
4435         else
4436                 msleep_interruptible(MAX_FLICKER_TIME);
4437         del_timer_sync(&sp->id_timer);
4438
4439         if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
4440                 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
4441                 last_gpio_ctrl_val = readq(&bar0->gpio_control);
4442         }
4443
4444         return 0;
4445 }
4446
4447 /**
4448  * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
4449  * @sp : private member of the device structure, which is a pointer to the
4450  *      s2io_nic structure.
4451  * @ep : pointer to the structure with pause parameters given by ethtool.
4452  * Description:
4453  * Returns the Pause frame generation and reception capability of the NIC.
4454  * Return value:
4455  *  void
4456  */
4457 static void s2io_ethtool_getpause_data(struct net_device *dev,
4458                                        struct ethtool_pauseparam *ep)
4459 {
4460         u64 val64;
4461         nic_t *sp = dev->priv;
4462         XENA_dev_config_t __iomem *bar0 = sp->bar0;
4463
4464         val64 = readq(&bar0->rmac_pause_cfg);
4465         if (val64 & RMAC_PAUSE_GEN_ENABLE)
4466                 ep->tx_pause = TRUE;
4467         if (val64 & RMAC_PAUSE_RX_ENABLE)
4468                 ep->rx_pause = TRUE;
4469         ep->autoneg = FALSE;
4470 }
4471
4472 /**
4473  * s2io_ethtool_setpause_data -  set/reset pause frame generation.
4474  * @sp : private member of the device structure, which is a pointer to the
4475  *      s2io_nic structure.
4476  * @ep : pointer to the structure with pause parameters given by ethtool.
4477  * Description:
4478  * It can be used to set or reset Pause frame generation or reception
4479  * support of the NIC.
4480  * Return value:
4481  * int, returns 0 on Success
4482  */
4483
4484 static int s2io_ethtool_setpause_data(struct net_device *dev,
4485                                struct ethtool_pauseparam *ep)
4486 {
4487         u64 val64;
4488         nic_t *sp = dev->priv;
4489         XENA_dev_config_t __iomem *bar0 = sp->bar0;
4490
4491         val64 = readq(&bar0->rmac_pause_cfg);
4492         if (ep->tx_pause)
4493                 val64 |= RMAC_PAUSE_GEN_ENABLE;
4494         else
4495                 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
4496         if (ep->rx_pause)
4497                 val64 |= RMAC_PAUSE_RX_ENABLE;
4498         else
4499                 val64 &= ~RMAC_PAUSE_RX_ENABLE;
4500         writeq(val64, &bar0->rmac_pause_cfg);
4501         return 0;
4502 }
4503
4504 /**
4505  * read_eeprom - reads 4 bytes of data from user given offset.
4506  * @sp : private member of the device structure, which is a pointer to the
4507  *      s2io_nic structure.
4508  * @off : offset at which the data must be written
4509  * @data : Its an output parameter where the data read at the given
4510  *      offset is stored.
4511  * Description:
4512  * Will read 4 bytes of data from the user given offset and return the
4513  * read data.
4514  * NOTE: Will allow to read only part of the EEPROM visible through the
4515  *   I2C bus.
4516  * Return value:
4517  *  -1 on failure and 0 on success.
4518  */
4519
4520 #define S2IO_DEV_ID             5
4521 static int read_eeprom(nic_t * sp, int off, u64 * data)
4522 {
4523         int ret = -1;
4524         u32 exit_cnt = 0;
4525         u64 val64;
4526         XENA_dev_config_t __iomem *bar0 = sp->bar0;
4527
4528         if (sp->device_type == XFRAME_I_DEVICE) {
4529                 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
4530                     I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
4531                     I2C_CONTROL_CNTL_START;
4532                 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
4533
4534                 while (exit_cnt < 5) {
4535                         val64 = readq(&bar0->i2c_control);
4536                         if (I2C_CONTROL_CNTL_END(val64)) {
4537                                 *data = I2C_CONTROL_GET_DATA(val64);
4538                                 ret = 0;
4539                                 break;
4540                         }
4541                         msleep(50);
4542                         exit_cnt++;
4543                 }
4544         }
4545
4546         if (sp->device_type == XFRAME_II_DEVICE) {
4547                 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
4548                         SPI_CONTROL_BYTECNT(0x3) | 
4549                         SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
4550                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4551                 val64 |= SPI_CONTROL_REQ;
4552                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4553                 while (exit_cnt < 5) {
4554                         val64 = readq(&bar0->spi_control);
4555                         if (val64 & SPI_CONTROL_NACK) {
4556                                 ret = 1;
4557                                 break;
4558                         } else if (val64 & SPI_CONTROL_DONE) {
4559                                 *data = readq(&bar0->spi_data);
4560                                 *data &= 0xffffff;
4561                                 ret = 0;
4562                                 break;
4563                         }
4564                         msleep(50);
4565                         exit_cnt++;
4566                 }
4567         }
4568         return ret;
4569 }
4570
4571 /**
4572  *  write_eeprom - actually writes the relevant part of the data value.
4573  *  @sp : private member of the device structure, which is a pointer to the
4574  *       s2io_nic structure.
4575  *  @off : offset at which the data must be written
4576  *  @data : The data that is to be written
4577  *  @cnt : Number of bytes of the data that are actually to be written into
4578  *  the Eeprom. (max of 3)
4579  * Description:
4580  *  Actually writes the relevant part of the data value into the Eeprom
4581  *  through the I2C bus.
4582  * Return value:
4583  *  0 on success, -1 on failure.
4584  */
4585
4586 static int write_eeprom(nic_t * sp, int off, u64 data, int cnt)
4587 {
4588         int exit_cnt = 0, ret = -1;
4589         u64 val64;
4590         XENA_dev_config_t __iomem *bar0 = sp->bar0;
4591
4592         if (sp->device_type == XFRAME_I_DEVICE) {
4593                 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
4594                     I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
4595                     I2C_CONTROL_CNTL_START;
4596                 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
4597
4598                 while (exit_cnt < 5) {
4599                         val64 = readq(&bar0->i2c_control);
4600                         if (I2C_CONTROL_CNTL_END(val64)) {
4601                                 if (!(val64 & I2C_CONTROL_NACK))
4602                                         ret = 0;
4603                                 break;
4604                         }
4605                         msleep(50);
4606                         exit_cnt++;
4607                 }
4608         }
4609
4610         if (sp->device_type == XFRAME_II_DEVICE) {
4611                 int write_cnt = (cnt == 8) ? 0 : cnt;
4612                 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
4613
4614                 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
4615                         SPI_CONTROL_BYTECNT(write_cnt) | 
4616                         SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
4617                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4618                 val64 |= SPI_CONTROL_REQ;
4619                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4620                 while (exit_cnt < 5) {
4621                         val64 = readq(&bar0->spi_control);
4622                         if (val64 & SPI_CONTROL_NACK) {
4623                                 ret = 1;
4624                                 break;
4625                         } else if (val64 & SPI_CONTROL_DONE) {
4626                                 ret = 0;
4627                                 break;
4628                         }
4629                         msleep(50);
4630                         exit_cnt++;
4631                 }
4632         }
4633         return ret;
4634 }
4635
4636 static void s2io_vpd_read(nic_t *nic)
4637 {
4638         u8 vpd_data[256],data;
4639         int i=0, cnt, fail = 0;
4640         int vpd_addr = 0x80;
4641
4642         if (nic->device_type == XFRAME_II_DEVICE) {
4643                 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
4644                 vpd_addr = 0x80;
4645         }
4646         else {
4647                 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
4648                 vpd_addr = 0x50;
4649         }
4650
4651         for (i = 0; i < 256; i +=4 ) {
4652                 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
4653                 pci_read_config_byte(nic->pdev,  (vpd_addr + 2), &data);
4654                 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
4655                 for (cnt = 0; cnt <5; cnt++) {
4656                         msleep(2);
4657                         pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
4658                         if (data == 0x80)
4659                                 break;
4660                 }
4661                 if (cnt >= 5) {
4662                         DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
4663                         fail = 1;
4664                         break;
4665                 }
4666                 pci_read_config_dword(nic->pdev,  (vpd_addr + 4),
4667                                       (u32 *)&vpd_data[i]);
4668         }
4669         if ((!fail) && (vpd_data[1] < VPD_PRODUCT_NAME_LEN)) {
4670                 memset(nic->product_name, 0, vpd_data[1]);
4671                 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
4672         }
4673 }
4674
4675 /**
4676  *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
4677  *  @sp : private member of the device structure, which is a pointer to the *       s2io_nic structure.
4678  *  @eeprom : pointer to the user level structure provided by ethtool,
4679  *  containing all relevant information.
4680  *  @data_buf : user defined value to be written into Eeprom.
4681  *  Description: Reads the values stored in the Eeprom at given offset
4682  *  for a given length. Stores these values int the input argument data
4683  *  buffer 'data_buf' and returns these to the caller (ethtool.)
4684  *  Return value:
4685  *  int  0 on success
4686  */
4687
4688 static int s2io_ethtool_geeprom(struct net_device *dev,
4689                          struct ethtool_eeprom *eeprom, u8 * data_buf)
4690 {
4691         u32 i, valid;
4692         u64 data;
4693         nic_t *sp = dev->priv;
4694
4695         eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
4696
4697         if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
4698                 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
4699
4700         for (i = 0; i < eeprom->len; i += 4) {
4701                 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
4702                         DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
4703                         return -EFAULT;
4704                 }
4705                 valid = INV(data);
4706                 memcpy((data_buf + i), &valid, 4);
4707         }
4708         return 0;
4709 }
4710
4711 /**
4712  *  s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
4713  *  @sp : private member of the device structure, which is a pointer to the
4714  *  s2io_nic structure.
4715  *  @eeprom : pointer to the user level structure provided by ethtool,
4716  *  containing all relevant information.
4717  *  @data_buf ; user defined value to be written into Eeprom.
4718  *  Description:
4719  *  Tries to write the user provided value in the Eeprom, at the offset
4720  *  given by the user.
4721  *  Return value:
4722  *  0 on success, -EFAULT on failure.
4723  */
4724
4725 static int s2io_ethtool_seeprom(struct net_device *dev,
4726                                 struct ethtool_eeprom *eeprom,
4727                                 u8 * data_buf)
4728 {
4729         int len = eeprom->len, cnt = 0;
4730         u64 valid = 0, data;
4731         nic_t *sp = dev->priv;
4732
4733         if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
4734                 DBG_PRINT(ERR_DBG,
4735                           "ETHTOOL_WRITE_EEPROM Err: Magic value ");
4736                 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
4737                           eeprom->magic);
4738                 return -EFAULT;
4739         }
4740
4741         while (len) {
4742                 data = (u32) data_buf[cnt] & 0x000000FF;
4743                 if (data) {
4744                         valid = (u32) (data << 24);
4745                 } else
4746                         valid = data;
4747
4748                 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
4749                         DBG_PRINT(ERR_DBG,
4750                                   "ETHTOOL_WRITE_EEPROM Err: Cannot ");
4751                         DBG_PRINT(ERR_DBG,
4752                                   "write into the specified offset\n");
4753                         return -EFAULT;
4754                 }
4755                 cnt++;
4756                 len--;
4757         }
4758
4759         return 0;
4760 }
4761
4762 /**
4763  * s2io_register_test - reads and writes into all clock domains.
4764  * @sp : private member of the device structure, which is a pointer to the
4765  * s2io_nic structure.
4766  * @data : variable that returns the result of each of the test conducted b
4767  * by the driver.
4768  * Description:
4769  * Read and write into all clock domains. The NIC has 3 clock domains,
4770  * see that registers in all the three regions are accessible.
4771  * Return value:
4772  * 0 on success.
4773  */
4774
4775 static int s2io_register_test(nic_t * sp, uint64_t * data)
4776 {
4777         XENA_dev_config_t __iomem *bar0 = sp->bar0;
4778         u64 val64 = 0, exp_val;
4779         int fail = 0;
4780
4781         val64 = readq(&bar0->pif_rd_swapper_fb);
4782         if (val64 != 0x123456789abcdefULL) {
4783                 fail = 1;
4784                 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
4785         }
4786
4787         val64 = readq(&bar0->rmac_pause_cfg);
4788         if (val64 != 0xc000ffff00000000ULL) {
4789                 fail = 1;
4790                 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
4791         }
4792
4793         val64 = readq(&bar0->rx_queue_cfg);
4794         if (sp->device_type == XFRAME_II_DEVICE)
4795                 exp_val = 0x0404040404040404ULL;
4796         else
4797                 exp_val = 0x0808080808080808ULL;
4798         if (val64 != exp_val) {
4799                 fail = 1;
4800                 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
4801         }
4802
4803         val64 = readq(&bar0->xgxs_efifo_cfg);
4804         if (val64 != 0x000000001923141EULL) {
4805                 fail = 1;
4806                 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
4807         }
4808
4809         val64 = 0x5A5A5A5A5A5A5A5AULL;
4810         writeq(val64, &bar0->xmsi_data);
4811         val64 = readq(&bar0->xmsi_data);
4812         if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
4813                 fail = 1;
4814                 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
4815         }
4816
4817         val64 = 0xA5A5A5A5A5A5A5A5ULL;
4818         writeq(val64, &bar0->xmsi_data);
4819         val64 = readq(&bar0->xmsi_data);
4820         if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
4821                 fail = 1;
4822                 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
4823         }
4824
4825         *data = fail;
4826         return fail;
4827 }
4828
4829 /**
4830  * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
4831  * @sp : private member of the device structure, which is a pointer to the
4832  * s2io_nic structure.
4833  * @data:variable that returns the result of each of the test conducted by
4834  * the driver.
4835  * Description:
4836  * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
4837  * register.
4838  * Return value:
4839  * 0 on success.
4840  */
4841
4842 static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
4843 {
4844         int fail = 0;
4845         u64 ret_data, org_4F0, org_7F0;
4846         u8 saved_4F0 = 0, saved_7F0 = 0;
4847         struct net_device *dev = sp->dev;
4848
4849         /* Test Write Error at offset 0 */
4850         /* Note that SPI interface allows write access to all areas
4851          * of EEPROM. Hence doing all negative testing only for Xframe I.
4852          */
4853         if (sp->device_type == XFRAME_I_DEVICE)
4854                 if (!write_eeprom(sp, 0, 0, 3))
4855                         fail = 1;
4856
4857         /* Save current values at offsets 0x4F0 and 0x7F0 */
4858         if (!read_eeprom(sp, 0x4F0, &org_4F0))
4859                 saved_4F0 = 1;
4860         if (!read_eeprom(sp, 0x7F0, &org_7F0))
4861                 saved_7F0 = 1;
4862
4863         /* Test Write at offset 4f0 */
4864         if (write_eeprom(sp, 0x4F0, 0x012345, 3))
4865                 fail = 1;
4866         if (read_eeprom(sp, 0x4F0, &ret_data))
4867                 fail = 1;
4868
4869         if (ret_data != 0x012345) {
4870                 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
4871                         "Data written %llx Data read %llx\n",
4872                         dev->name, (unsigned long long)0x12345,
4873                         (unsigned long long)ret_data);
4874                 fail = 1;
4875         }
4876
4877         /* Reset the EEPROM data go FFFF */
4878         write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
4879
4880         /* Test Write Request Error at offset 0x7c */
4881         if (sp->device_type == XFRAME_I_DEVICE)
4882                 if (!write_eeprom(sp, 0x07C, 0, 3))
4883                         fail = 1;
4884
4885         /* Test Write Request at offset 0x7f0 */
4886         if (write_eeprom(sp, 0x7F0, 0x012345, 3))
4887                 fail = 1;
4888         if (read_eeprom(sp, 0x7F0, &ret_data))
4889                 fail = 1;
4890
4891         if (ret_data != 0x012345) {
4892                 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
4893                         "Data written %llx Data read %llx\n",
4894                         dev->name, (unsigned long long)0x12345,
4895                         (unsigned long long)ret_data);
4896                 fail = 1;
4897         }
4898
4899         /* Reset the EEPROM data go FFFF */
4900         write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
4901
4902         if (sp->device_type == XFRAME_I_DEVICE) {
4903                 /* Test Write Error at offset 0x80 */
4904                 if (!write_eeprom(sp, 0x080, 0, 3))
4905                         fail = 1;
4906
4907                 /* Test Write Error at offset 0xfc */
4908                 if (!write_eeprom(sp, 0x0FC, 0, 3))
4909                         fail = 1;
4910
4911                 /* Test Write Error at offset 0x100 */
4912                 if (!write_eeprom(sp, 0x100, 0, 3))
4913                         fail = 1;
4914
4915                 /* Test Write Error at offset 4ec */
4916                 if (!write_eeprom(sp, 0x4EC, 0, 3))
4917                         fail = 1;
4918         }
4919
4920         /* Restore values at offsets 0x4F0 and 0x7F0 */
4921         if (saved_4F0)
4922                 write_eeprom(sp, 0x4F0, org_4F0, 3);
4923         if (saved_7F0)
4924                 write_eeprom(sp, 0x7F0, org_7F0, 3);
4925
4926         *data = fail;
4927         return fail;
4928 }
4929
4930 /**
4931  * s2io_bist_test - invokes the MemBist test of the card .
4932  * @sp : private member of the device structure, which is a pointer to the
4933  * s2io_nic structure.
4934  * @data:variable that returns the result of each of the test conducted by
4935  * the driver.
4936  * Description:
4937  * This invokes the MemBist test of the card. We give around
4938  * 2 secs time for the Test to complete. If it's still not complete
4939  * within this peiod, we consider that the test failed.
4940  * Return value:
4941  * 0 on success and -1 on failure.
4942  */
4943
4944 static int s2io_bist_test(nic_t * sp, uint64_t * data)
4945 {
4946         u8 bist = 0;
4947         int cnt = 0, ret = -1;
4948
4949         pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
4950         bist |= PCI_BIST_START;
4951         pci_write_config_word(sp->pdev, PCI_BIST, bist);
4952
4953         while (cnt < 20) {
4954                 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
4955                 if (!(bist & PCI_BIST_START)) {
4956                         *data = (bist & PCI_BIST_CODE_MASK);
4957                         ret = 0;
4958                         break;
4959                 }
4960                 msleep(100);
4961                 cnt++;
4962         }
4963
4964         return ret;
4965 }
4966
4967 /**
4968  * s2io-link_test - verifies the link state of the nic
4969  * @sp ; private member of the device structure, which is a pointer to the
4970  * s2io_nic structure.
4971  * @data: variable that returns the result of each of the test conducted by
4972  * the driver.
4973  * Description:
4974  * The function verifies the link state of the NIC and updates the input
4975  * argument 'data' appropriately.
4976  * Return value:
4977  * 0 on success.
4978  */
4979
4980 static int s2io_link_test(nic_t * sp, uint64_t * data)
4981 {
4982         XENA_dev_config_t __iomem *bar0 = sp->bar0;
4983         u64 val64;
4984
4985         val64 = readq(&bar0->adapter_status);
4986         if (val64 & ADAPTER_STATUS_RMAC_LOCAL_FAULT)
4987                 *data = 1;
4988
4989         return 0;
4990 }
4991
4992 /**
4993  * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
4994  * @sp - private member of the device structure, which is a pointer to the
4995  * s2io_nic structure.
4996  * @data - variable that returns the result of each of the test
4997  * conducted by the driver.
4998  * Description:
4999  *  This is one of the offline test that tests the read and write
5000  *  access to the RldRam chip on the NIC.
5001  * Return value:
5002  *  0 on success.
5003  */
5004
5005 static int s2io_rldram_test(nic_t * sp, uint64_t * data)
5006 {
5007         XENA_dev_config_t __iomem *bar0 = sp->bar0;
5008         u64 val64;
5009         int cnt, iteration = 0, test_fail = 0;
5010
5011         val64 = readq(&bar0->adapter_control);
5012         val64 &= ~ADAPTER_ECC_EN;
5013         writeq(val64, &bar0->adapter_control);
5014
5015         val64 = readq(&bar0->mc_rldram_test_ctrl);
5016         val64 |= MC_RLDRAM_TEST_MODE;
5017         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5018
5019         val64 = readq(&bar0->mc_rldram_mrs);
5020         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
5021         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5022
5023         val64 |= MC_RLDRAM_MRS_ENABLE;
5024         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5025
5026         while (iteration < 2) {
5027                 val64 = 0x55555555aaaa0000ULL;
5028                 if (iteration == 1) {
5029                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
5030                 }
5031                 writeq(val64, &bar0->mc_rldram_test_d0);
5032
5033                 val64 = 0xaaaa5a5555550000ULL;
5034                 if (iteration == 1) {
5035                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
5036                 }
5037                 writeq(val64, &bar0->mc_rldram_test_d1);
5038
5039                 val64 = 0x55aaaaaaaa5a0000ULL;
5040                 if (iteration == 1) {
5041                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
5042                 }
5043                 writeq(val64, &bar0->mc_rldram_test_d2);
5044
5045                 val64 = (u64) (0x0000003ffffe0100ULL);
5046                 writeq(val64, &bar0->mc_rldram_test_add);
5047
5048                 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
5049                         MC_RLDRAM_TEST_GO;
5050                 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5051
5052                 for (cnt = 0; cnt < 5; cnt++) {
5053                         val64 = readq(&bar0->mc_rldram_test_ctrl);
5054                         if (val64 & MC_RLDRAM_TEST_DONE)
5055                                 break;
5056                         msleep(200);
5057                 }
5058
5059                 if (cnt == 5)
5060                         break;
5061
5062                 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
5063                 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5064
5065                 for (cnt = 0; cnt < 5; cnt++) {
5066                         val64 = readq(&bar0->mc_rldram_test_ctrl);
5067                         if (val64 & MC_RLDRAM_TEST_DONE)
5068                                 break;
5069                         msleep(500);
5070                 }
5071
5072                 if (cnt == 5)
5073                         break;
5074
5075                 val64 = readq(&bar0->mc_rldram_test_ctrl);
5076                 if (!(val64 & MC_RLDRAM_TEST_PASS))
5077                         test_fail = 1;
5078
5079                 iteration++;
5080         }
5081
5082         *data = test_fail;
5083
5084         /* Bring the adapter out of test mode */
5085         SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
5086
5087         return test_fail;
5088 }
5089
5090 /**
5091  *  s2io_ethtool_test - conducts 6 tsets to determine the health of card.
5092  *  @sp : private member of the device structure, which is a pointer to the
5093  *  s2io_nic structure.
5094  *  @ethtest : pointer to a ethtool command specific structure that will be
5095  *  returned to the user.
5096  *  @data : variable that returns the result of each of the test
5097  * conducted by the driver.
5098  * Description:
5099  *  This function conducts 6 tests ( 4 offline and 2 online) to determine
5100  *  the health of the card.
5101  * Return value:
5102  *  void
5103  */
5104
5105 static void s2io_ethtool_test(struct net_device *dev,
5106                               struct ethtool_test *ethtest,
5107                               uint64_t * data)
5108 {
5109         nic_t *sp = dev->priv;
5110         int orig_state = netif_running(sp->dev);
5111
5112         if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
5113                 /* Offline Tests. */
5114                 if (orig_state)
5115                         s2io_close(sp->dev);
5116
5117                 if (s2io_register_test(sp, &data[0]))
5118                         ethtest->flags |= ETH_TEST_FL_FAILED;
5119
5120                 s2io_reset(sp);
5121
5122                 if (s2io_rldram_test(sp, &data[3]))
5123                         ethtest->flags |= ETH_TEST_FL_FAILED;
5124
5125                 s2io_reset(sp);
5126
5127                 if (s2io_eeprom_test(sp, &data[1]))
5128                         ethtest->flags |= ETH_TEST_FL_FAILED;
5129
5130                 if (s2io_bist_test(sp, &data[4]))
5131                         ethtest->flags |= ETH_TEST_FL_FAILED;
5132
5133                 if (orig_state)
5134                         s2io_open(sp->dev);
5135
5136                 data[2] = 0;
5137         } else {
5138                 /* Online Tests. */
5139                 if (!orig_state) {
5140                         DBG_PRINT(ERR_DBG,
5141                                   "%s: is not up, cannot run test\n",
5142                                   dev->name);
5143                         data[0] = -1;
5144                         data[1] = -1;
5145                         data[2] = -1;
5146                         data[3] = -1;
5147                         data[4] = -1;
5148                 }
5149
5150                 if (s2io_link_test(sp, &data[2]))
5151                         ethtest->flags |= ETH_TEST_FL_FAILED;
5152
5153                 data[0] = 0;
5154                 data[1] = 0;
5155                 data[3] = 0;
5156                 data[4] = 0;
5157         }
5158 }
5159
5160 static void s2io_get_ethtool_stats(struct net_device *dev,
5161                                    struct ethtool_stats *estats,
5162                                    u64 * tmp_stats)
5163 {
5164         int i = 0;
5165         nic_t *sp = dev->priv;
5166         StatInfo_t *stat_info = sp->mac_control.stats_info;
5167         u64 tmp;
5168
5169         s2io_updt_stats(sp);
5170         tmp_stats[i++] =
5171                 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32  |
5172                 le32_to_cpu(stat_info->tmac_frms);
5173         tmp_stats[i++] =
5174                 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
5175                 le32_to_cpu(stat_info->tmac_data_octets);
5176         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
5177         tmp_stats[i++] =
5178                 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
5179                 le32_to_cpu(stat_info->tmac_mcst_frms);
5180         tmp_stats[i++] =
5181                 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
5182                 le32_to_cpu(stat_info->tmac_bcst_frms);
5183         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
5184         tmp_stats[i++] =
5185                 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
5186                 le32_to_cpu(stat_info->tmac_any_err_frms);
5187         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
5188         tmp_stats[i++] =
5189                 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
5190                 le32_to_cpu(stat_info->tmac_vld_ip);
5191         tmp_stats[i++] =
5192                 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
5193                 le32_to_cpu(stat_info->tmac_drop_ip);
5194         tmp_stats[i++] =
5195                 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
5196                 le32_to_cpu(stat_info->tmac_icmp);
5197         tmp_stats[i++] =
5198                 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
5199                 le32_to_cpu(stat_info->tmac_rst_tcp);
5200         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
5201         tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
5202                 le32_to_cpu(stat_info->tmac_udp);
5203         tmp_stats[i++] =
5204                 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
5205                 le32_to_cpu(stat_info->rmac_vld_frms);
5206         tmp_stats[i++] =
5207                 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
5208                 le32_to_cpu(stat_info->rmac_data_octets);
5209         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
5210         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
5211         tmp_stats[i++] =
5212                 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
5213                 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
5214         tmp_stats[i++] =
5215                 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
5216                 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
5217         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
5218         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
5219         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
5220         tmp_stats[i++] =
5221                 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
5222                 le32_to_cpu(stat_info->rmac_discarded_frms);
5223         tmp_stats[i++] =
5224                 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
5225                 le32_to_cpu(stat_info->rmac_usized_frms);
5226         tmp_stats[i++] =
5227                 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
5228                 le32_to_cpu(stat_info->rmac_osized_frms);
5229         tmp_stats[i++] =
5230                 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
5231                 le32_to_cpu(stat_info->rmac_frag_frms);
5232         tmp_stats[i++] =
5233                 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
5234                 le32_to_cpu(stat_info->rmac_jabber_frms);
5235         tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
5236                 le32_to_cpu(stat_info->rmac_ip);
5237         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
5238         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
5239         tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
5240                 le32_to_cpu(stat_info->rmac_drop_ip);
5241         tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
5242                 le32_to_cpu(stat_info->rmac_icmp);
5243         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
5244         tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
5245                 le32_to_cpu(stat_info->rmac_udp);
5246         tmp_stats[i++] =
5247                 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
5248                 le32_to_cpu(stat_info->rmac_err_drp_udp);
5249         tmp_stats[i++] =
5250                 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
5251                 le32_to_cpu(stat_info->rmac_pause_cnt);
5252         tmp_stats[i++] =
5253                 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
5254                 le32_to_cpu(stat_info->rmac_accepted_ip);
5255         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
5256         tmp_stats[i++] = 0;
5257         tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
5258         tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
5259         tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
5260         tmp_stats[i++] = stat_info->sw_stat.sending_both;
5261         tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
5262         tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
5263         tmp = 0;
5264         if (stat_info->sw_stat.num_aggregations) {
5265                 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
5266                 do_div(tmp, stat_info->sw_stat.num_aggregations);
5267         }
5268         tmp_stats[i++] = tmp;
5269 }
5270
5271 static int s2io_ethtool_get_regs_len(struct net_device *dev)
5272 {
5273         return (XENA_REG_SPACE);
5274 }
5275
5276
5277 static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
5278 {
5279         nic_t *sp = dev->priv;
5280
5281         return (sp->rx_csum);
5282 }
5283
5284 static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
5285 {
5286         nic_t *sp = dev->priv;
5287
5288         if (data)
5289                 sp->rx_csum = 1;
5290         else
5291                 sp->rx_csum = 0;
5292
5293         return 0;
5294 }
5295
5296 static int s2io_get_eeprom_len(struct net_device *dev)
5297 {
5298         return (XENA_EEPROM_SPACE);
5299 }
5300
5301 static int s2io_ethtool_self_test_count(struct net_device *dev)
5302 {
5303         return (S2IO_TEST_LEN);
5304 }
5305
5306 static void s2io_ethtool_get_strings(struct net_device *dev,
5307                                      u32 stringset, u8 * data)
5308 {
5309         switch (stringset) {
5310         case ETH_SS_TEST:
5311                 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
5312                 break;
5313         case ETH_SS_STATS:
5314                 memcpy(data, &ethtool_stats_keys,
5315                        sizeof(ethtool_stats_keys));
5316         }
5317 }
5318 static int s2io_ethtool_get_stats_count(struct net_device *dev)
5319 {
5320         return (S2IO_STAT_LEN);
5321 }
5322
5323 static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
5324 {
5325         if (data)
5326                 dev->features |= NETIF_F_IP_CSUM;
5327         else
5328                 dev->features &= ~NETIF_F_IP_CSUM;
5329
5330         return 0;
5331 }
5332
5333
5334 static struct ethtool_ops netdev_ethtool_ops = {
5335         .get_settings = s2io_ethtool_gset,
5336         .set_settings = s2io_ethtool_sset,
5337         .get_drvinfo = s2io_ethtool_gdrvinfo,
5338         .get_regs_len = s2io_ethtool_get_regs_len,
5339         .get_regs = s2io_ethtool_gregs,
5340         .get_link = ethtool_op_get_link,
5341         .get_eeprom_len = s2io_get_eeprom_len,
5342         .get_eeprom = s2io_ethtool_geeprom,
5343         .set_eeprom = s2io_ethtool_seeprom,
5344         .get_pauseparam = s2io_ethtool_getpause_data,
5345         .set_pauseparam = s2io_ethtool_setpause_data,
5346         .get_rx_csum = s2io_ethtool_get_rx_csum,
5347         .set_rx_csum = s2io_ethtool_set_rx_csum,
5348         .get_tx_csum = ethtool_op_get_tx_csum,
5349         .set_tx_csum = s2io_ethtool_op_set_tx_csum,
5350         .get_sg = ethtool_op_get_sg,
5351         .set_sg = ethtool_op_set_sg,
5352 #ifdef NETIF_F_TSO
5353         .get_tso = ethtool_op_get_tso,
5354         .set_tso = ethtool_op_set_tso,
5355 #endif
5356         .get_ufo = ethtool_op_get_ufo,
5357         .set_ufo = ethtool_op_set_ufo,
5358         .self_test_count = s2io_ethtool_self_test_count,
5359         .self_test = s2io_ethtool_test,
5360         .get_strings = s2io_ethtool_get_strings,
5361         .phys_id = s2io_ethtool_idnic,
5362         .get_stats_count = s2io_ethtool_get_stats_count,
5363         .get_ethtool_stats = s2io_get_ethtool_stats
5364 };
5365
5366 /**
5367  *  s2io_ioctl - Entry point for the Ioctl
5368  *  @dev :  Device pointer.
5369  *  @ifr :  An IOCTL specefic structure, that can contain a pointer to
5370  *  a proprietary structure used to pass information to the driver.
5371  *  @cmd :  This is used to distinguish between the different commands that
5372  *  can be passed to the IOCTL functions.
5373  *  Description:
5374  *  Currently there are no special functionality supported in IOCTL, hence
5375  *  function always return EOPNOTSUPPORTED
5376  */
5377
5378 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5379 {
5380         return -EOPNOTSUPP;
5381 }
5382
5383 /**
5384  *  s2io_change_mtu - entry point to change MTU size for the device.
5385  *   @dev : device pointer.
5386  *   @new_mtu : the new MTU size for the device.
5387  *   Description: A driver entry point to change MTU size for the device.
5388  *   Before changing the MTU the device must be stopped.
5389  *  Return value:
5390  *   0 on success and an appropriate (-)ve integer as defined in errno.h
5391  *   file on failure.
5392  */
5393
5394 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
5395 {
5396         nic_t *sp = dev->priv;
5397
5398         if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
5399                 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
5400                           dev->name);
5401                 return -EPERM;
5402         }
5403
5404         dev->mtu = new_mtu;
5405         if (netif_running(dev)) {
5406                 s2io_card_down(sp);
5407                 netif_stop_queue(dev);
5408                 if (s2io_card_up(sp)) {
5409                         DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
5410                                   __FUNCTION__);
5411                 }
5412                 if (netif_queue_stopped(dev))
5413                         netif_wake_queue(dev);
5414         } else { /* Device is down */
5415                 XENA_dev_config_t __iomem *bar0 = sp->bar0;
5416                 u64 val64 = new_mtu;
5417
5418                 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
5419         }
5420
5421         return 0;
5422 }
5423
5424 /**
5425  *  s2io_tasklet - Bottom half of the ISR.
5426  *  @dev_adr : address of the device structure in dma_addr_t format.
5427  *  Description:
5428  *  This is the tasklet or the bottom half of the ISR. This is
5429  *  an extension of the ISR which is scheduled by the scheduler to be run
5430  *  when the load on the CPU is low. All low priority tasks of the ISR can
5431  *  be pushed into the tasklet. For now the tasklet is used only to
5432  *  replenish the Rx buffers in the Rx buffer descriptors.
5433  *  Return value:
5434  *  void.
5435  */
5436
5437 static void s2io_tasklet(unsigned long dev_addr)
5438 {
5439         struct net_device *dev = (struct net_device *) dev_addr;
5440         nic_t *sp = dev->priv;
5441         int i, ret;
5442         mac_info_t *mac_control;
5443         struct config_param *config;
5444
5445         mac_control = &sp->mac_control;
5446         config = &sp->config;
5447
5448         if (!TASKLET_IN_USE) {
5449                 for (i = 0; i < config->rx_ring_num; i++) {
5450                         ret = fill_rx_buffers(sp, i);
5451                         if (ret == -ENOMEM) {
5452                                 DBG_PRINT(ERR_DBG, "%s: Out of ",
5453                                           dev->name);
5454                                 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
5455                                 break;
5456                         } else if (ret == -EFILL) {
5457                                 DBG_PRINT(ERR_DBG,
5458                                           "%s: Rx Ring %d is full\n",
5459                                           dev->name, i);
5460                                 break;
5461                         }
5462                 }
5463                 clear_bit(0, (&sp->tasklet_status));
5464         }
5465 }
5466
5467 /**
5468  * s2io_set_link - Set the LInk status
5469  * @data: long pointer to device private structue
5470  * Description: Sets the link status for the adapter
5471  */
5472
5473 static void s2io_set_link(unsigned long data)
5474 {
5475         nic_t *nic = (nic_t *) data;
5476         struct net_device *dev = nic->dev;
5477         XENA_dev_config_t __iomem *bar0 = nic->bar0;
5478         register u64 val64;
5479         u16 subid;
5480
5481         if (test_and_set_bit(0, &(nic->link_state))) {
5482                 /* The card is being reset, no point doing anything */
5483                 return;
5484         }
5485
5486         subid = nic->pdev->subsystem_device;
5487         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
5488                 /*
5489                  * Allow a small delay for the NICs self initiated
5490                  * cleanup to complete.
5491                  */
5492                 msleep(100);
5493         }
5494
5495         val64 = readq(&bar0->adapter_status);
5496         if (verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
5497                 if (LINK_IS_UP(val64)) {
5498                         val64 = readq(&bar0->adapter_control);
5499                         val64 |= ADAPTER_CNTL_EN;
5500                         writeq(val64, &bar0->adapter_control);
5501                         if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
5502                                                              subid)) {
5503                                 val64 = readq(&bar0->gpio_control);
5504                                 val64 |= GPIO_CTRL_GPIO_0;
5505                                 writeq(val64, &bar0->gpio_control);
5506                                 val64 = readq(&bar0->gpio_control);
5507                         } else {
5508                                 val64 |= ADAPTER_LED_ON;
5509                                 writeq(val64, &bar0->adapter_control);
5510                         }
5511                         if (s2io_link_fault_indication(nic) ==
5512                                                 MAC_RMAC_ERR_TIMER) {
5513                                 val64 = readq(&bar0->adapter_status);
5514                                 if (!LINK_IS_UP(val64)) {
5515                                         DBG_PRINT(ERR_DBG, "%s:", dev->name);
5516                                         DBG_PRINT(ERR_DBG, " Link down");
5517                                         DBG_PRINT(ERR_DBG, "after ");
5518                                         DBG_PRINT(ERR_DBG, "enabling ");
5519                                         DBG_PRINT(ERR_DBG, "device \n");
5520                                 }
5521                         }
5522                         if (nic->device_enabled_once == FALSE) {
5523