Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband
[linux-3.10.git] / drivers / net / s2io.c
1 /************************************************************************
2  * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3  * Copyright(c) 2002-2005 Neterion Inc.
4
5  * This software may be used and distributed according to the terms of
6  * the GNU General Public License (GPL), incorporated herein by reference.
7  * Drivers based on or derived from this code fall under the GPL and must
8  * retain the authorship, copyright and license notice.  This file is not
9  * a complete program and may only be used when the entire operating
10  * system is licensed under the GPL.
11  * See the file COPYING in this distribution for more information.
12  *
13  * Credits:
14  * Jeff Garzik          : For pointing out the improper error condition
15  *                        check in the s2io_xmit routine and also some
16  *                        issues in the Tx watch dog function. Also for
17  *                        patiently answering all those innumerable
18  *                        questions regaring the 2.6 porting issues.
19  * Stephen Hemminger    : Providing proper 2.6 porting mechanism for some
20  *                        macros available only in 2.6 Kernel.
21  * Francois Romieu      : For pointing out all code part that were
22  *                        deprecated and also styling related comments.
23  * Grant Grundler       : For helping me get rid of some Architecture
24  *                        dependent code.
25  * Christopher Hellwig  : Some more 2.6 specific issues in the driver.
26  *
27  * The module loadable parameters that are supported by the driver and a brief
28  * explaination of all the variables.
29  *
30  * rx_ring_num : This can be used to program the number of receive rings used
31  * in the driver.
32  * rx_ring_sz: This defines the number of receive blocks each ring can have.
33  *     This is also an array of size 8.
34  * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35  *              values are 1, 2 and 3.
36  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37  * tx_fifo_len: This too is an array of 8. Each element defines the number of
38  * Tx descriptors that can be associated with each corresponding FIFO.
39  * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40  *     1(MSI), 2(MSI_X). Default value is '0(INTA)'
41  * lro: Specifies whether to enable Large Receive Offload (LRO) or not.
42  *     Possible values '1' for enable '0' for disable. Default is '0'
43  * lro_max_pkts: This parameter defines maximum number of packets can be
44  *     aggregated as a single large packet
45  ************************************************************************/
46
47 #include <linux/module.h>
48 #include <linux/types.h>
49 #include <linux/errno.h>
50 #include <linux/ioport.h>
51 #include <linux/pci.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/kernel.h>
54 #include <linux/netdevice.h>
55 #include <linux/etherdevice.h>
56 #include <linux/skbuff.h>
57 #include <linux/init.h>
58 #include <linux/delay.h>
59 #include <linux/stddef.h>
60 #include <linux/ioctl.h>
61 #include <linux/timex.h>
62 #include <linux/sched.h>
63 #include <linux/ethtool.h>
64 #include <linux/workqueue.h>
65 #include <linux/if_vlan.h>
66 #include <linux/ip.h>
67 #include <linux/tcp.h>
68 #include <net/tcp.h>
69
70 #include <asm/system.h>
71 #include <asm/uaccess.h>
72 #include <asm/io.h>
73 #include <asm/div64.h>
74
75 /* local include */
76 #include "s2io.h"
77 #include "s2io-regs.h"
78
79 #define DRV_VERSION "2.0.14.2"
80
81 /* S2io Driver name & version. */
82 static char s2io_driver_name[] = "Neterion";
83 static char s2io_driver_version[] = DRV_VERSION;
84
85 static int rxd_size[4] = {32,48,48,64};
86 static int rxd_count[4] = {127,85,85,63};
87
88 static inline int RXD_IS_UP2DT(RxD_t *rxdp)
89 {
90         int ret;
91
92         ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
93                 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
94
95         return ret;
96 }
97
98 /*
99  * Cards with following subsystem_id have a link state indication
100  * problem, 600B, 600C, 600D, 640B, 640C and 640D.
101  * macro below identifies these cards given the subsystem_id.
102  */
103 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
104         (dev_type == XFRAME_I_DEVICE) ?                 \
105                 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
106                  ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
107
108 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
109                                       ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
110 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
111 #define PANIC   1
112 #define LOW     2
113 static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
114 {
115         mac_info_t *mac_control;
116
117         mac_control = &sp->mac_control;
118         if (rxb_size <= rxd_count[sp->rxd_mode])
119                 return PANIC;
120         else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
121                 return  LOW;
122         return 0;
123 }
124
125 /* Ethtool related variables and Macros. */
126 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
127         "Register test\t(offline)",
128         "Eeprom test\t(offline)",
129         "Link test\t(online)",
130         "RLDRAM test\t(offline)",
131         "BIST Test\t(offline)"
132 };
133
134 static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
135         {"tmac_frms"},
136         {"tmac_data_octets"},
137         {"tmac_drop_frms"},
138         {"tmac_mcst_frms"},
139         {"tmac_bcst_frms"},
140         {"tmac_pause_ctrl_frms"},
141         {"tmac_ttl_octets"},
142         {"tmac_ucst_frms"},
143         {"tmac_nucst_frms"},
144         {"tmac_any_err_frms"},
145         {"tmac_ttl_less_fb_octets"},
146         {"tmac_vld_ip_octets"},
147         {"tmac_vld_ip"},
148         {"tmac_drop_ip"},
149         {"tmac_icmp"},
150         {"tmac_rst_tcp"},
151         {"tmac_tcp"},
152         {"tmac_udp"},
153         {"rmac_vld_frms"},
154         {"rmac_data_octets"},
155         {"rmac_fcs_err_frms"},
156         {"rmac_drop_frms"},
157         {"rmac_vld_mcst_frms"},
158         {"rmac_vld_bcst_frms"},
159         {"rmac_in_rng_len_err_frms"},
160         {"rmac_out_rng_len_err_frms"},
161         {"rmac_long_frms"},
162         {"rmac_pause_ctrl_frms"},
163         {"rmac_unsup_ctrl_frms"},
164         {"rmac_ttl_octets"},
165         {"rmac_accepted_ucst_frms"},
166         {"rmac_accepted_nucst_frms"},
167         {"rmac_discarded_frms"},
168         {"rmac_drop_events"},
169         {"rmac_ttl_less_fb_octets"},
170         {"rmac_ttl_frms"},
171         {"rmac_usized_frms"},
172         {"rmac_osized_frms"},
173         {"rmac_frag_frms"},
174         {"rmac_jabber_frms"},
175         {"rmac_ttl_64_frms"},
176         {"rmac_ttl_65_127_frms"},
177         {"rmac_ttl_128_255_frms"},
178         {"rmac_ttl_256_511_frms"},
179         {"rmac_ttl_512_1023_frms"},
180         {"rmac_ttl_1024_1518_frms"},
181         {"rmac_ip"},
182         {"rmac_ip_octets"},
183         {"rmac_hdr_err_ip"},
184         {"rmac_drop_ip"},
185         {"rmac_icmp"},
186         {"rmac_tcp"},
187         {"rmac_udp"},
188         {"rmac_err_drp_udp"},
189         {"rmac_xgmii_err_sym"},
190         {"rmac_frms_q0"},
191         {"rmac_frms_q1"},
192         {"rmac_frms_q2"},
193         {"rmac_frms_q3"},
194         {"rmac_frms_q4"},
195         {"rmac_frms_q5"},
196         {"rmac_frms_q6"},
197         {"rmac_frms_q7"},
198         {"rmac_full_q0"},
199         {"rmac_full_q1"},
200         {"rmac_full_q2"},
201         {"rmac_full_q3"},
202         {"rmac_full_q4"},
203         {"rmac_full_q5"},
204         {"rmac_full_q6"},
205         {"rmac_full_q7"},
206         {"rmac_pause_cnt"},
207         {"rmac_xgmii_data_err_cnt"},
208         {"rmac_xgmii_ctrl_err_cnt"},
209         {"rmac_accepted_ip"},
210         {"rmac_err_tcp"},
211         {"rd_req_cnt"},
212         {"new_rd_req_cnt"},
213         {"new_rd_req_rtry_cnt"},
214         {"rd_rtry_cnt"},
215         {"wr_rtry_rd_ack_cnt"},
216         {"wr_req_cnt"},
217         {"new_wr_req_cnt"},
218         {"new_wr_req_rtry_cnt"},
219         {"wr_rtry_cnt"},
220         {"wr_disc_cnt"},
221         {"rd_rtry_wr_ack_cnt"},
222         {"txp_wr_cnt"},
223         {"txd_rd_cnt"},
224         {"txd_wr_cnt"},
225         {"rxd_rd_cnt"},
226         {"rxd_wr_cnt"},
227         {"txf_rd_cnt"},
228         {"rxf_wr_cnt"},
229         {"rmac_ttl_1519_4095_frms"},
230         {"rmac_ttl_4096_8191_frms"},
231         {"rmac_ttl_8192_max_frms"},
232         {"rmac_ttl_gt_max_frms"},
233         {"rmac_osized_alt_frms"},
234         {"rmac_jabber_alt_frms"},
235         {"rmac_gt_max_alt_frms"},
236         {"rmac_vlan_frms"},
237         {"rmac_len_discard"},
238         {"rmac_fcs_discard"},
239         {"rmac_pf_discard"},
240         {"rmac_da_discard"},
241         {"rmac_red_discard"},
242         {"rmac_rts_discard"},
243         {"rmac_ingm_full_discard"},
244         {"link_fault_cnt"},
245         {"\n DRIVER STATISTICS"},
246         {"single_bit_ecc_errs"},
247         {"double_bit_ecc_errs"},
248         {"parity_err_cnt"},
249         {"serious_err_cnt"},
250         {"soft_reset_cnt"},
251         {"fifo_full_cnt"},
252         {"ring_full_cnt"},
253         ("alarm_transceiver_temp_high"),
254         ("alarm_transceiver_temp_low"),
255         ("alarm_laser_bias_current_high"),
256         ("alarm_laser_bias_current_low"),
257         ("alarm_laser_output_power_high"),
258         ("alarm_laser_output_power_low"),
259         ("warn_transceiver_temp_high"),
260         ("warn_transceiver_temp_low"),
261         ("warn_laser_bias_current_high"),
262         ("warn_laser_bias_current_low"),
263         ("warn_laser_output_power_high"),
264         ("warn_laser_output_power_low"),
265         ("lro_aggregated_pkts"),
266         ("lro_flush_both_count"),
267         ("lro_out_of_sequence_pkts"),
268         ("lro_flush_due_to_max_pkts"),
269         ("lro_avg_aggr_pkts"),
270 };
271
272 #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
273 #define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
274
275 #define S2IO_TEST_LEN   sizeof(s2io_gstrings) / ETH_GSTRING_LEN
276 #define S2IO_STRINGS_LEN        S2IO_TEST_LEN * ETH_GSTRING_LEN
277
278 #define S2IO_TIMER_CONF(timer, handle, arg, exp)                \
279                         init_timer(&timer);                     \
280                         timer.function = handle;                \
281                         timer.data = (unsigned long) arg;       \
282                         mod_timer(&timer, (jiffies + exp))      \
283
284 /* Add the vlan */
285 static void s2io_vlan_rx_register(struct net_device *dev,
286                                         struct vlan_group *grp)
287 {
288         nic_t *nic = dev->priv;
289         unsigned long flags;
290
291         spin_lock_irqsave(&nic->tx_lock, flags);
292         nic->vlgrp = grp;
293         spin_unlock_irqrestore(&nic->tx_lock, flags);
294 }
295
296 /* Unregister the vlan */
297 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
298 {
299         nic_t *nic = dev->priv;
300         unsigned long flags;
301
302         spin_lock_irqsave(&nic->tx_lock, flags);
303         if (nic->vlgrp)
304                 nic->vlgrp->vlan_devices[vid] = NULL;
305         spin_unlock_irqrestore(&nic->tx_lock, flags);
306 }
307
308 /*
309  * Constants to be programmed into the Xena's registers, to configure
310  * the XAUI.
311  */
312
313 #define END_SIGN        0x0
314 static const u64 herc_act_dtx_cfg[] = {
315         /* Set address */
316         0x8000051536750000ULL, 0x80000515367500E0ULL,
317         /* Write data */
318         0x8000051536750004ULL, 0x80000515367500E4ULL,
319         /* Set address */
320         0x80010515003F0000ULL, 0x80010515003F00E0ULL,
321         /* Write data */
322         0x80010515003F0004ULL, 0x80010515003F00E4ULL,
323         /* Set address */
324         0x801205150D440000ULL, 0x801205150D4400E0ULL,
325         /* Write data */
326         0x801205150D440004ULL, 0x801205150D4400E4ULL,
327         /* Set address */
328         0x80020515F2100000ULL, 0x80020515F21000E0ULL,
329         /* Write data */
330         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
331         /* Done */
332         END_SIGN
333 };
334
335 static const u64 xena_dtx_cfg[] = {
336         /* Set address */
337         0x8000051500000000ULL, 0x80000515000000E0ULL,
338         /* Write data */
339         0x80000515D9350004ULL, 0x80000515D93500E4ULL,
340         /* Set address */
341         0x8001051500000000ULL, 0x80010515000000E0ULL,
342         /* Write data */
343         0x80010515001E0004ULL, 0x80010515001E00E4ULL,
344         /* Set address */
345         0x8002051500000000ULL, 0x80020515000000E0ULL,
346         /* Write data */
347         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
348         END_SIGN
349 };
350
351 /*
352  * Constants for Fixing the MacAddress problem seen mostly on
353  * Alpha machines.
354  */
355 static const u64 fix_mac[] = {
356         0x0060000000000000ULL, 0x0060600000000000ULL,
357         0x0040600000000000ULL, 0x0000600000000000ULL,
358         0x0020600000000000ULL, 0x0060600000000000ULL,
359         0x0020600000000000ULL, 0x0060600000000000ULL,
360         0x0020600000000000ULL, 0x0060600000000000ULL,
361         0x0020600000000000ULL, 0x0060600000000000ULL,
362         0x0020600000000000ULL, 0x0060600000000000ULL,
363         0x0020600000000000ULL, 0x0060600000000000ULL,
364         0x0020600000000000ULL, 0x0060600000000000ULL,
365         0x0020600000000000ULL, 0x0060600000000000ULL,
366         0x0020600000000000ULL, 0x0060600000000000ULL,
367         0x0020600000000000ULL, 0x0060600000000000ULL,
368         0x0020600000000000ULL, 0x0000600000000000ULL,
369         0x0040600000000000ULL, 0x0060600000000000ULL,
370         END_SIGN
371 };
372
373 /* Module Loadable parameters. */
374 static unsigned int tx_fifo_num = 1;
375 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
376     {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
377 static unsigned int rx_ring_num = 1;
378 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
379     {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
380 static unsigned int rts_frm_len[MAX_RX_RINGS] =
381     {[0 ...(MAX_RX_RINGS - 1)] = 0 };
382 static unsigned int rx_ring_mode = 1;
383 static unsigned int use_continuous_tx_intrs = 1;
384 static unsigned int rmac_pause_time = 0x100;
385 static unsigned int mc_pause_threshold_q0q3 = 187;
386 static unsigned int mc_pause_threshold_q4q7 = 187;
387 static unsigned int shared_splits;
388 static unsigned int tmac_util_period = 5;
389 static unsigned int rmac_util_period = 5;
390 static unsigned int bimodal = 0;
391 static unsigned int l3l4hdr_size = 128;
392 #ifndef CONFIG_S2IO_NAPI
393 static unsigned int indicate_max_pkts;
394 #endif
395 /* Frequency of Rx desc syncs expressed as power of 2 */
396 static unsigned int rxsync_frequency = 3;
397 /* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */
398 static unsigned int intr_type = 0;
399 /* Large receive offload feature */
400 static unsigned int lro = 0;
401 /* Max pkts to be aggregated by LRO at one time. If not specified,
402  * aggregation happens until we hit max IP pkt size(64K)
403  */
404 static unsigned int lro_max_pkts = 0xFFFF;
405
406 /*
407  * S2IO device table.
408  * This table lists all the devices that this driver supports.
409  */
410 static struct pci_device_id s2io_tbl[] __devinitdata = {
411         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
412          PCI_ANY_ID, PCI_ANY_ID},
413         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
414          PCI_ANY_ID, PCI_ANY_ID},
415         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
416          PCI_ANY_ID, PCI_ANY_ID},
417         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
418          PCI_ANY_ID, PCI_ANY_ID},
419         {0,}
420 };
421
422 MODULE_DEVICE_TABLE(pci, s2io_tbl);
423
424 static struct pci_driver s2io_driver = {
425       .name = "S2IO",
426       .id_table = s2io_tbl,
427       .probe = s2io_init_nic,
428       .remove = __devexit_p(s2io_rem_nic),
429 };
430
431 /* A simplifier macro used both by init and free shared_mem Fns(). */
432 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
433
434 /**
435  * init_shared_mem - Allocation and Initialization of Memory
436  * @nic: Device private variable.
437  * Description: The function allocates all the memory areas shared
438  * between the NIC and the driver. This includes Tx descriptors,
439  * Rx descriptors and the statistics block.
440  */
441
442 static int init_shared_mem(struct s2io_nic *nic)
443 {
444         u32 size;
445         void *tmp_v_addr, *tmp_v_addr_next;
446         dma_addr_t tmp_p_addr, tmp_p_addr_next;
447         RxD_block_t *pre_rxd_blk = NULL;
448         int i, j, blk_cnt, rx_sz, tx_sz;
449         int lst_size, lst_per_page;
450         struct net_device *dev = nic->dev;
451         unsigned long tmp;
452         buffAdd_t *ba;
453
454         mac_info_t *mac_control;
455         struct config_param *config;
456
457         mac_control = &nic->mac_control;
458         config = &nic->config;
459
460
461         /* Allocation and initialization of TXDLs in FIOFs */
462         size = 0;
463         for (i = 0; i < config->tx_fifo_num; i++) {
464                 size += config->tx_cfg[i].fifo_len;
465         }
466         if (size > MAX_AVAILABLE_TXDS) {
467                 DBG_PRINT(ERR_DBG, "%s: Requested TxDs too high, ",
468                           __FUNCTION__);
469                 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
470                 return FAILURE;
471         }
472
473         lst_size = (sizeof(TxD_t) * config->max_txds);
474         tx_sz = lst_size * size;
475         lst_per_page = PAGE_SIZE / lst_size;
476
477         for (i = 0; i < config->tx_fifo_num; i++) {
478                 int fifo_len = config->tx_cfg[i].fifo_len;
479                 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
480                 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
481                                                           GFP_KERNEL);
482                 if (!mac_control->fifos[i].list_info) {
483                         DBG_PRINT(ERR_DBG,
484                                   "Malloc failed for list_info\n");
485                         return -ENOMEM;
486                 }
487                 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
488         }
489         for (i = 0; i < config->tx_fifo_num; i++) {
490                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
491                                                 lst_per_page);
492                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
493                 mac_control->fifos[i].tx_curr_put_info.fifo_len =
494                     config->tx_cfg[i].fifo_len - 1;
495                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
496                 mac_control->fifos[i].tx_curr_get_info.fifo_len =
497                     config->tx_cfg[i].fifo_len - 1;
498                 mac_control->fifos[i].fifo_no = i;
499                 mac_control->fifos[i].nic = nic;
500                 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
501
502                 for (j = 0; j < page_num; j++) {
503                         int k = 0;
504                         dma_addr_t tmp_p;
505                         void *tmp_v;
506                         tmp_v = pci_alloc_consistent(nic->pdev,
507                                                      PAGE_SIZE, &tmp_p);
508                         if (!tmp_v) {
509                                 DBG_PRINT(ERR_DBG,
510                                           "pci_alloc_consistent ");
511                                 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
512                                 return -ENOMEM;
513                         }
514                         /* If we got a zero DMA address(can happen on
515                          * certain platforms like PPC), reallocate.
516                          * Store virtual address of page we don't want,
517                          * to be freed later.
518                          */
519                         if (!tmp_p) {
520                                 mac_control->zerodma_virt_addr = tmp_v;
521                                 DBG_PRINT(INIT_DBG, 
522                                 "%s: Zero DMA address for TxDL. ", dev->name);
523                                 DBG_PRINT(INIT_DBG, 
524                                 "Virtual address %p\n", tmp_v);
525                                 tmp_v = pci_alloc_consistent(nic->pdev,
526                                                      PAGE_SIZE, &tmp_p);
527                                 if (!tmp_v) {
528                                         DBG_PRINT(ERR_DBG,
529                                           "pci_alloc_consistent ");
530                                         DBG_PRINT(ERR_DBG, "failed for TxDL\n");
531                                         return -ENOMEM;
532                                 }
533                         }
534                         while (k < lst_per_page) {
535                                 int l = (j * lst_per_page) + k;
536                                 if (l == config->tx_cfg[i].fifo_len)
537                                         break;
538                                 mac_control->fifos[i].list_info[l].list_virt_addr =
539                                     tmp_v + (k * lst_size);
540                                 mac_control->fifos[i].list_info[l].list_phy_addr =
541                                     tmp_p + (k * lst_size);
542                                 k++;
543                         }
544                 }
545         }
546
547         nic->ufo_in_band_v = kmalloc((sizeof(u64) * size), GFP_KERNEL);
548         if (!nic->ufo_in_band_v)
549                 return -ENOMEM;
550
551         /* Allocation and initialization of RXDs in Rings */
552         size = 0;
553         for (i = 0; i < config->rx_ring_num; i++) {
554                 if (config->rx_cfg[i].num_rxd %
555                     (rxd_count[nic->rxd_mode] + 1)) {
556                         DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
557                         DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
558                                   i);
559                         DBG_PRINT(ERR_DBG, "RxDs per Block");
560                         return FAILURE;
561                 }
562                 size += config->rx_cfg[i].num_rxd;
563                 mac_control->rings[i].block_count =
564                         config->rx_cfg[i].num_rxd /
565                         (rxd_count[nic->rxd_mode] + 1 );
566                 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
567                         mac_control->rings[i].block_count;
568         }
569         if (nic->rxd_mode == RXD_MODE_1)
570                 size = (size * (sizeof(RxD1_t)));
571         else
572                 size = (size * (sizeof(RxD3_t)));
573         rx_sz = size;
574
575         for (i = 0; i < config->rx_ring_num; i++) {
576                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
577                 mac_control->rings[i].rx_curr_get_info.offset = 0;
578                 mac_control->rings[i].rx_curr_get_info.ring_len =
579                     config->rx_cfg[i].num_rxd - 1;
580                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
581                 mac_control->rings[i].rx_curr_put_info.offset = 0;
582                 mac_control->rings[i].rx_curr_put_info.ring_len =
583                     config->rx_cfg[i].num_rxd - 1;
584                 mac_control->rings[i].nic = nic;
585                 mac_control->rings[i].ring_no = i;
586
587                 blk_cnt = config->rx_cfg[i].num_rxd /
588                                 (rxd_count[nic->rxd_mode] + 1);
589                 /*  Allocating all the Rx blocks */
590                 for (j = 0; j < blk_cnt; j++) {
591                         rx_block_info_t *rx_blocks;
592                         int l;
593
594                         rx_blocks = &mac_control->rings[i].rx_blocks[j];
595                         size = SIZE_OF_BLOCK; //size is always page size
596                         tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
597                                                           &tmp_p_addr);
598                         if (tmp_v_addr == NULL) {
599                                 /*
600                                  * In case of failure, free_shared_mem()
601                                  * is called, which should free any
602                                  * memory that was alloced till the
603                                  * failure happened.
604                                  */
605                                 rx_blocks->block_virt_addr = tmp_v_addr;
606                                 return -ENOMEM;
607                         }
608                         memset(tmp_v_addr, 0, size);
609                         rx_blocks->block_virt_addr = tmp_v_addr;
610                         rx_blocks->block_dma_addr = tmp_p_addr;
611                         rx_blocks->rxds = kmalloc(sizeof(rxd_info_t)*
612                                                   rxd_count[nic->rxd_mode],
613                                                   GFP_KERNEL);
614                         for (l=0; l<rxd_count[nic->rxd_mode];l++) {
615                                 rx_blocks->rxds[l].virt_addr =
616                                         rx_blocks->block_virt_addr +
617                                         (rxd_size[nic->rxd_mode] * l);
618                                 rx_blocks->rxds[l].dma_addr =
619                                         rx_blocks->block_dma_addr +
620                                         (rxd_size[nic->rxd_mode] * l);
621                         }
622                 }
623                 /* Interlinking all Rx Blocks */
624                 for (j = 0; j < blk_cnt; j++) {
625                         tmp_v_addr =
626                                 mac_control->rings[i].rx_blocks[j].block_virt_addr;
627                         tmp_v_addr_next =
628                                 mac_control->rings[i].rx_blocks[(j + 1) %
629                                               blk_cnt].block_virt_addr;
630                         tmp_p_addr =
631                                 mac_control->rings[i].rx_blocks[j].block_dma_addr;
632                         tmp_p_addr_next =
633                                 mac_control->rings[i].rx_blocks[(j + 1) %
634                                               blk_cnt].block_dma_addr;
635
636                         pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
637                         pre_rxd_blk->reserved_2_pNext_RxD_block =
638                             (unsigned long) tmp_v_addr_next;
639                         pre_rxd_blk->pNext_RxD_Blk_physical =
640                             (u64) tmp_p_addr_next;
641                 }
642         }
643         if (nic->rxd_mode >= RXD_MODE_3A) {
644                 /*
645                  * Allocation of Storages for buffer addresses in 2BUFF mode
646                  * and the buffers as well.
647                  */
648                 for (i = 0; i < config->rx_ring_num; i++) {
649                         blk_cnt = config->rx_cfg[i].num_rxd /
650                            (rxd_count[nic->rxd_mode]+ 1);
651                         mac_control->rings[i].ba =
652                                 kmalloc((sizeof(buffAdd_t *) * blk_cnt),
653                                      GFP_KERNEL);
654                         if (!mac_control->rings[i].ba)
655                                 return -ENOMEM;
656                         for (j = 0; j < blk_cnt; j++) {
657                                 int k = 0;
658                                 mac_control->rings[i].ba[j] =
659                                         kmalloc((sizeof(buffAdd_t) *
660                                                 (rxd_count[nic->rxd_mode] + 1)),
661                                                 GFP_KERNEL);
662                                 if (!mac_control->rings[i].ba[j])
663                                         return -ENOMEM;
664                                 while (k != rxd_count[nic->rxd_mode]) {
665                                         ba = &mac_control->rings[i].ba[j][k];
666
667                                         ba->ba_0_org = (void *) kmalloc
668                                             (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
669                                         if (!ba->ba_0_org)
670                                                 return -ENOMEM;
671                                         tmp = (unsigned long)ba->ba_0_org;
672                                         tmp += ALIGN_SIZE;
673                                         tmp &= ~((unsigned long) ALIGN_SIZE);
674                                         ba->ba_0 = (void *) tmp;
675
676                                         ba->ba_1_org = (void *) kmalloc
677                                             (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
678                                         if (!ba->ba_1_org)
679                                                 return -ENOMEM;
680                                         tmp = (unsigned long) ba->ba_1_org;
681                                         tmp += ALIGN_SIZE;
682                                         tmp &= ~((unsigned long) ALIGN_SIZE);
683                                         ba->ba_1 = (void *) tmp;
684                                         k++;
685                                 }
686                         }
687                 }
688         }
689
690         /* Allocation and initialization of Statistics block */
691         size = sizeof(StatInfo_t);
692         mac_control->stats_mem = pci_alloc_consistent
693             (nic->pdev, size, &mac_control->stats_mem_phy);
694
695         if (!mac_control->stats_mem) {
696                 /*
697                  * In case of failure, free_shared_mem() is called, which
698                  * should free any memory that was alloced till the
699                  * failure happened.
700                  */
701                 return -ENOMEM;
702         }
703         mac_control->stats_mem_sz = size;
704
705         tmp_v_addr = mac_control->stats_mem;
706         mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
707         memset(tmp_v_addr, 0, size);
708         DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
709                   (unsigned long long) tmp_p_addr);
710
711         return SUCCESS;
712 }
713
714 /**
715  * free_shared_mem - Free the allocated Memory
716  * @nic:  Device private variable.
717  * Description: This function is to free all memory locations allocated by
718  * the init_shared_mem() function and return it to the kernel.
719  */
720
721 static void free_shared_mem(struct s2io_nic *nic)
722 {
723         int i, j, blk_cnt, size;
724         void *tmp_v_addr;
725         dma_addr_t tmp_p_addr;
726         mac_info_t *mac_control;
727         struct config_param *config;
728         int lst_size, lst_per_page;
729         struct net_device *dev = nic->dev;
730
731         if (!nic)
732                 return;
733
734         mac_control = &nic->mac_control;
735         config = &nic->config;
736
737         lst_size = (sizeof(TxD_t) * config->max_txds);
738         lst_per_page = PAGE_SIZE / lst_size;
739
740         for (i = 0; i < config->tx_fifo_num; i++) {
741                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
742                                                 lst_per_page);
743                 for (j = 0; j < page_num; j++) {
744                         int mem_blks = (j * lst_per_page);
745                         if (!mac_control->fifos[i].list_info)
746                                 return; 
747                         if (!mac_control->fifos[i].list_info[mem_blks].
748                                  list_virt_addr)
749                                 break;
750                         pci_free_consistent(nic->pdev, PAGE_SIZE,
751                                             mac_control->fifos[i].
752                                             list_info[mem_blks].
753                                             list_virt_addr,
754                                             mac_control->fifos[i].
755                                             list_info[mem_blks].
756                                             list_phy_addr);
757                 }
758                 /* If we got a zero DMA address during allocation,
759                  * free the page now
760                  */
761                 if (mac_control->zerodma_virt_addr) {
762                         pci_free_consistent(nic->pdev, PAGE_SIZE,
763                                             mac_control->zerodma_virt_addr,
764                                             (dma_addr_t)0);
765                         DBG_PRINT(INIT_DBG, 
766                                 "%s: Freeing TxDL with zero DMA addr. ",
767                                 dev->name);
768                         DBG_PRINT(INIT_DBG, "Virtual address %p\n",
769                                 mac_control->zerodma_virt_addr);
770                 }
771                 kfree(mac_control->fifos[i].list_info);
772         }
773
774         size = SIZE_OF_BLOCK;
775         for (i = 0; i < config->rx_ring_num; i++) {
776                 blk_cnt = mac_control->rings[i].block_count;
777                 for (j = 0; j < blk_cnt; j++) {
778                         tmp_v_addr = mac_control->rings[i].rx_blocks[j].
779                                 block_virt_addr;
780                         tmp_p_addr = mac_control->rings[i].rx_blocks[j].
781                                 block_dma_addr;
782                         if (tmp_v_addr == NULL)
783                                 break;
784                         pci_free_consistent(nic->pdev, size,
785                                             tmp_v_addr, tmp_p_addr);
786                         kfree(mac_control->rings[i].rx_blocks[j].rxds);
787                 }
788         }
789
790         if (nic->rxd_mode >= RXD_MODE_3A) {
791                 /* Freeing buffer storage addresses in 2BUFF mode. */
792                 for (i = 0; i < config->rx_ring_num; i++) {
793                         blk_cnt = config->rx_cfg[i].num_rxd /
794                             (rxd_count[nic->rxd_mode] + 1);
795                         for (j = 0; j < blk_cnt; j++) {
796                                 int k = 0;
797                                 if (!mac_control->rings[i].ba[j])
798                                         continue;
799                                 while (k != rxd_count[nic->rxd_mode]) {
800                                         buffAdd_t *ba =
801                                                 &mac_control->rings[i].ba[j][k];
802                                         kfree(ba->ba_0_org);
803                                         kfree(ba->ba_1_org);
804                                         k++;
805                                 }
806                                 kfree(mac_control->rings[i].ba[j]);
807                         }
808                         kfree(mac_control->rings[i].ba);
809                 }
810         }
811
812         if (mac_control->stats_mem) {
813                 pci_free_consistent(nic->pdev,
814                                     mac_control->stats_mem_sz,
815                                     mac_control->stats_mem,
816                                     mac_control->stats_mem_phy);
817         }
818         if (nic->ufo_in_band_v)
819                 kfree(nic->ufo_in_band_v);
820 }
821
822 /**
823  * s2io_verify_pci_mode -
824  */
825
826 static int s2io_verify_pci_mode(nic_t *nic)
827 {
828         XENA_dev_config_t __iomem *bar0 = nic->bar0;
829         register u64 val64 = 0;
830         int     mode;
831
832         val64 = readq(&bar0->pci_mode);
833         mode = (u8)GET_PCI_MODE(val64);
834
835         if ( val64 & PCI_MODE_UNKNOWN_MODE)
836                 return -1;      /* Unknown PCI mode */
837         return mode;
838 }
839
840 #define NEC_VENID   0x1033
841 #define NEC_DEVID   0x0125
842 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
843 {
844         struct pci_dev *tdev = NULL;
845         while ((tdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
846                 if ((tdev->vendor == NEC_VENID) && (tdev->device == NEC_DEVID)){
847                         if (tdev->bus == s2io_pdev->bus->parent)
848                                 return 1;
849                 }
850         }
851         return 0;
852 }
853
854 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
855 /**
856  * s2io_print_pci_mode -
857  */
858 static int s2io_print_pci_mode(nic_t *nic)
859 {
860         XENA_dev_config_t __iomem *bar0 = nic->bar0;
861         register u64 val64 = 0;
862         int     mode;
863         struct config_param *config = &nic->config;
864
865         val64 = readq(&bar0->pci_mode);
866         mode = (u8)GET_PCI_MODE(val64);
867
868         if ( val64 & PCI_MODE_UNKNOWN_MODE)
869                 return -1;      /* Unknown PCI mode */
870
871         config->bus_speed = bus_speed[mode];
872
873         if (s2io_on_nec_bridge(nic->pdev)) {
874                 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
875                                                         nic->dev->name);
876                 return mode;
877         }
878
879         if (val64 & PCI_MODE_32_BITS) {
880                 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
881         } else {
882                 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
883         }
884
885         switch(mode) {
886                 case PCI_MODE_PCI_33:
887                         DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
888                         break;
889                 case PCI_MODE_PCI_66:
890                         DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
891                         break;
892                 case PCI_MODE_PCIX_M1_66:
893                         DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
894                         break;
895                 case PCI_MODE_PCIX_M1_100:
896                         DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
897                         break;
898                 case PCI_MODE_PCIX_M1_133:
899                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
900                         break;
901                 case PCI_MODE_PCIX_M2_66:
902                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
903                         break;
904                 case PCI_MODE_PCIX_M2_100:
905                         DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
906                         break;
907                 case PCI_MODE_PCIX_M2_133:
908                         DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
909                         break;
910                 default:
911                         return -1;      /* Unsupported bus speed */
912         }
913
914         return mode;
915 }
916
917 /**
918  *  init_nic - Initialization of hardware
919  *  @nic: device peivate variable
920  *  Description: The function sequentially configures every block
921  *  of the H/W from their reset values.
922  *  Return Value:  SUCCESS on success and
923  *  '-1' on failure (endian settings incorrect).
924  */
925
926 static int init_nic(struct s2io_nic *nic)
927 {
928         XENA_dev_config_t __iomem *bar0 = nic->bar0;
929         struct net_device *dev = nic->dev;
930         register u64 val64 = 0;
931         void __iomem *add;
932         u32 time;
933         int i, j;
934         mac_info_t *mac_control;
935         struct config_param *config;
936         int dtx_cnt = 0;
937         unsigned long long mem_share;
938         int mem_size;
939
940         mac_control = &nic->mac_control;
941         config = &nic->config;
942
943         /* to set the swapper controle on the card */
944         if(s2io_set_swapper(nic)) {
945                 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
946                 return -1;
947         }
948
949         /*
950          * Herc requires EOI to be removed from reset before XGXS, so..
951          */
952         if (nic->device_type & XFRAME_II_DEVICE) {
953                 val64 = 0xA500000000ULL;
954                 writeq(val64, &bar0->sw_reset);
955                 msleep(500);
956                 val64 = readq(&bar0->sw_reset);
957         }
958
959         /* Remove XGXS from reset state */
960         val64 = 0;
961         writeq(val64, &bar0->sw_reset);
962         msleep(500);
963         val64 = readq(&bar0->sw_reset);
964
965         /*  Enable Receiving broadcasts */
966         add = &bar0->mac_cfg;
967         val64 = readq(&bar0->mac_cfg);
968         val64 |= MAC_RMAC_BCAST_ENABLE;
969         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
970         writel((u32) val64, add);
971         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
972         writel((u32) (val64 >> 32), (add + 4));
973
974         /* Read registers in all blocks */
975         val64 = readq(&bar0->mac_int_mask);
976         val64 = readq(&bar0->mc_int_mask);
977         val64 = readq(&bar0->xgxs_int_mask);
978
979         /*  Set MTU */
980         val64 = dev->mtu;
981         writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
982
983         if (nic->device_type & XFRAME_II_DEVICE) {
984                 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
985                         SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
986                                           &bar0->dtx_control, UF);
987                         if (dtx_cnt & 0x1)
988                                 msleep(1); /* Necessary!! */
989                         dtx_cnt++;
990                 }
991         } else {
992                 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
993                         SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
994                                           &bar0->dtx_control, UF);
995                         val64 = readq(&bar0->dtx_control);
996                         dtx_cnt++;
997                 }
998         }
999
1000         /*  Tx DMA Initialization */
1001         val64 = 0;
1002         writeq(val64, &bar0->tx_fifo_partition_0);
1003         writeq(val64, &bar0->tx_fifo_partition_1);
1004         writeq(val64, &bar0->tx_fifo_partition_2);
1005         writeq(val64, &bar0->tx_fifo_partition_3);
1006
1007
1008         for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1009                 val64 |=
1010                     vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
1011                          13) | vBIT(config->tx_cfg[i].fifo_priority,
1012                                     ((i * 32) + 5), 3);
1013
1014                 if (i == (config->tx_fifo_num - 1)) {
1015                         if (i % 2 == 0)
1016                                 i++;
1017                 }
1018
1019                 switch (i) {
1020                 case 1:
1021                         writeq(val64, &bar0->tx_fifo_partition_0);
1022                         val64 = 0;
1023                         break;
1024                 case 3:
1025                         writeq(val64, &bar0->tx_fifo_partition_1);
1026                         val64 = 0;
1027                         break;
1028                 case 5:
1029                         writeq(val64, &bar0->tx_fifo_partition_2);
1030                         val64 = 0;
1031                         break;
1032                 case 7:
1033                         writeq(val64, &bar0->tx_fifo_partition_3);
1034                         break;
1035                 }
1036         }
1037
1038         /*
1039          * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1040          * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1041          */
1042         if ((nic->device_type == XFRAME_I_DEVICE) &&
1043                 (get_xena_rev_id(nic->pdev) < 4))
1044                 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1045
1046         val64 = readq(&bar0->tx_fifo_partition_0);
1047         DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1048                   &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1049
1050         /*
1051          * Initialization of Tx_PA_CONFIG register to ignore packet
1052          * integrity checking.
1053          */
1054         val64 = readq(&bar0->tx_pa_cfg);
1055         val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1056             TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1057         writeq(val64, &bar0->tx_pa_cfg);
1058
1059         /* Rx DMA intialization. */
1060         val64 = 0;
1061         for (i = 0; i < config->rx_ring_num; i++) {
1062                 val64 |=
1063                     vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1064                          3);
1065         }
1066         writeq(val64, &bar0->rx_queue_priority);
1067
1068         /*
1069          * Allocating equal share of memory to all the
1070          * configured Rings.
1071          */
1072         val64 = 0;
1073         if (nic->device_type & XFRAME_II_DEVICE)
1074                 mem_size = 32;
1075         else
1076                 mem_size = 64;
1077
1078         for (i = 0; i < config->rx_ring_num; i++) {
1079                 switch (i) {
1080                 case 0:
1081                         mem_share = (mem_size / config->rx_ring_num +
1082                                      mem_size % config->rx_ring_num);
1083                         val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1084                         continue;
1085                 case 1:
1086                         mem_share = (mem_size / config->rx_ring_num);
1087                         val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1088                         continue;
1089                 case 2:
1090                         mem_share = (mem_size / config->rx_ring_num);
1091                         val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1092                         continue;
1093                 case 3:
1094                         mem_share = (mem_size / config->rx_ring_num);
1095                         val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1096                         continue;
1097                 case 4:
1098                         mem_share = (mem_size / config->rx_ring_num);
1099                         val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1100                         continue;
1101                 case 5:
1102                         mem_share = (mem_size / config->rx_ring_num);
1103                         val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1104                         continue;
1105                 case 6:
1106                         mem_share = (mem_size / config->rx_ring_num);
1107                         val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1108                         continue;
1109                 case 7:
1110                         mem_share = (mem_size / config->rx_ring_num);
1111                         val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1112                         continue;
1113                 }
1114         }
1115         writeq(val64, &bar0->rx_queue_cfg);
1116
1117         /*
1118          * Filling Tx round robin registers
1119          * as per the number of FIFOs
1120          */
1121         switch (config->tx_fifo_num) {
1122         case 1:
1123                 val64 = 0x0000000000000000ULL;
1124                 writeq(val64, &bar0->tx_w_round_robin_0);
1125                 writeq(val64, &bar0->tx_w_round_robin_1);
1126                 writeq(val64, &bar0->tx_w_round_robin_2);
1127                 writeq(val64, &bar0->tx_w_round_robin_3);
1128                 writeq(val64, &bar0->tx_w_round_robin_4);
1129                 break;
1130         case 2:
1131                 val64 = 0x0000010000010000ULL;
1132                 writeq(val64, &bar0->tx_w_round_robin_0);
1133                 val64 = 0x0100000100000100ULL;
1134                 writeq(val64, &bar0->tx_w_round_robin_1);
1135                 val64 = 0x0001000001000001ULL;
1136                 writeq(val64, &bar0->tx_w_round_robin_2);
1137                 val64 = 0x0000010000010000ULL;
1138                 writeq(val64, &bar0->tx_w_round_robin_3);
1139                 val64 = 0x0100000000000000ULL;
1140                 writeq(val64, &bar0->tx_w_round_robin_4);
1141                 break;
1142         case 3:
1143                 val64 = 0x0001000102000001ULL;
1144                 writeq(val64, &bar0->tx_w_round_robin_0);
1145                 val64 = 0x0001020000010001ULL;
1146                 writeq(val64, &bar0->tx_w_round_robin_1);
1147                 val64 = 0x0200000100010200ULL;
1148                 writeq(val64, &bar0->tx_w_round_robin_2);
1149                 val64 = 0x0001000102000001ULL;
1150                 writeq(val64, &bar0->tx_w_round_robin_3);
1151                 val64 = 0x0001020000000000ULL;
1152                 writeq(val64, &bar0->tx_w_round_robin_4);
1153                 break;
1154         case 4:
1155                 val64 = 0x0001020300010200ULL;
1156                 writeq(val64, &bar0->tx_w_round_robin_0);
1157                 val64 = 0x0100000102030001ULL;
1158                 writeq(val64, &bar0->tx_w_round_robin_1);
1159                 val64 = 0x0200010000010203ULL;
1160                 writeq(val64, &bar0->tx_w_round_robin_2);
1161                 val64 = 0x0001020001000001ULL;
1162                 writeq(val64, &bar0->tx_w_round_robin_3);
1163                 val64 = 0x0203000100000000ULL;
1164                 writeq(val64, &bar0->tx_w_round_robin_4);
1165                 break;
1166         case 5:
1167                 val64 = 0x0001000203000102ULL;
1168                 writeq(val64, &bar0->tx_w_round_robin_0);
1169                 val64 = 0x0001020001030004ULL;
1170                 writeq(val64, &bar0->tx_w_round_robin_1);
1171                 val64 = 0x0001000203000102ULL;
1172                 writeq(val64, &bar0->tx_w_round_robin_2);
1173                 val64 = 0x0001020001030004ULL;
1174                 writeq(val64, &bar0->tx_w_round_robin_3);
1175                 val64 = 0x0001000000000000ULL;
1176                 writeq(val64, &bar0->tx_w_round_robin_4);
1177                 break;
1178         case 6:
1179                 val64 = 0x0001020304000102ULL;
1180                 writeq(val64, &bar0->tx_w_round_robin_0);
1181                 val64 = 0x0304050001020001ULL;
1182                 writeq(val64, &bar0->tx_w_round_robin_1);
1183                 val64 = 0x0203000100000102ULL;
1184                 writeq(val64, &bar0->tx_w_round_robin_2);
1185                 val64 = 0x0304000102030405ULL;
1186                 writeq(val64, &bar0->tx_w_round_robin_3);
1187                 val64 = 0x0001000200000000ULL;
1188                 writeq(val64, &bar0->tx_w_round_robin_4);
1189                 break;
1190         case 7:
1191                 val64 = 0x0001020001020300ULL;
1192                 writeq(val64, &bar0->tx_w_round_robin_0);
1193                 val64 = 0x0102030400010203ULL;
1194                 writeq(val64, &bar0->tx_w_round_robin_1);
1195                 val64 = 0x0405060001020001ULL;
1196                 writeq(val64, &bar0->tx_w_round_robin_2);
1197                 val64 = 0x0304050000010200ULL;
1198                 writeq(val64, &bar0->tx_w_round_robin_3);
1199                 val64 = 0x0102030000000000ULL;
1200                 writeq(val64, &bar0->tx_w_round_robin_4);
1201                 break;
1202         case 8:
1203                 val64 = 0x0001020300040105ULL;
1204                 writeq(val64, &bar0->tx_w_round_robin_0);
1205                 val64 = 0x0200030106000204ULL;
1206                 writeq(val64, &bar0->tx_w_round_robin_1);
1207                 val64 = 0x0103000502010007ULL;
1208                 writeq(val64, &bar0->tx_w_round_robin_2);
1209                 val64 = 0x0304010002060500ULL;
1210                 writeq(val64, &bar0->tx_w_round_robin_3);
1211                 val64 = 0x0103020400000000ULL;
1212                 writeq(val64, &bar0->tx_w_round_robin_4);
1213                 break;
1214         }
1215
1216         /* Enable Tx FIFO partition 0. */
1217         val64 = readq(&bar0->tx_fifo_partition_0);
1218         val64 |= (TX_FIFO_PARTITION_EN);
1219         writeq(val64, &bar0->tx_fifo_partition_0);
1220
1221         /* Filling the Rx round robin registers as per the
1222          * number of Rings and steering based on QoS.
1223          */
1224         switch (config->rx_ring_num) {
1225         case 1:
1226                 val64 = 0x8080808080808080ULL;
1227                 writeq(val64, &bar0->rts_qos_steering);
1228                 break;
1229         case 2:
1230                 val64 = 0x0000010000010000ULL;
1231                 writeq(val64, &bar0->rx_w_round_robin_0);
1232                 val64 = 0x0100000100000100ULL;
1233                 writeq(val64, &bar0->rx_w_round_robin_1);
1234                 val64 = 0x0001000001000001ULL;
1235                 writeq(val64, &bar0->rx_w_round_robin_2);
1236                 val64 = 0x0000010000010000ULL;
1237                 writeq(val64, &bar0->rx_w_round_robin_3);
1238                 val64 = 0x0100000000000000ULL;
1239                 writeq(val64, &bar0->rx_w_round_robin_4);
1240
1241                 val64 = 0x8080808040404040ULL;
1242                 writeq(val64, &bar0->rts_qos_steering);
1243                 break;
1244         case 3:
1245                 val64 = 0x0001000102000001ULL;
1246                 writeq(val64, &bar0->rx_w_round_robin_0);
1247                 val64 = 0x0001020000010001ULL;
1248                 writeq(val64, &bar0->rx_w_round_robin_1);
1249                 val64 = 0x0200000100010200ULL;
1250                 writeq(val64, &bar0->rx_w_round_robin_2);
1251                 val64 = 0x0001000102000001ULL;
1252                 writeq(val64, &bar0->rx_w_round_robin_3);
1253                 val64 = 0x0001020000000000ULL;
1254                 writeq(val64, &bar0->rx_w_round_robin_4);
1255
1256                 val64 = 0x8080804040402020ULL;
1257                 writeq(val64, &bar0->rts_qos_steering);
1258                 break;
1259         case 4:
1260                 val64 = 0x0001020300010200ULL;
1261                 writeq(val64, &bar0->rx_w_round_robin_0);
1262                 val64 = 0x0100000102030001ULL;
1263                 writeq(val64, &bar0->rx_w_round_robin_1);
1264                 val64 = 0x0200010000010203ULL;
1265                 writeq(val64, &bar0->rx_w_round_robin_2);
1266                 val64 = 0x0001020001000001ULL;  
1267                 writeq(val64, &bar0->rx_w_round_robin_3);
1268                 val64 = 0x0203000100000000ULL;
1269                 writeq(val64, &bar0->rx_w_round_robin_4);
1270
1271                 val64 = 0x8080404020201010ULL;
1272                 writeq(val64, &bar0->rts_qos_steering);
1273                 break;
1274         case 5:
1275                 val64 = 0x0001000203000102ULL;
1276                 writeq(val64, &bar0->rx_w_round_robin_0);
1277                 val64 = 0x0001020001030004ULL;
1278                 writeq(val64, &bar0->rx_w_round_robin_1);
1279                 val64 = 0x0001000203000102ULL;
1280                 writeq(val64, &bar0->rx_w_round_robin_2);
1281                 val64 = 0x0001020001030004ULL;
1282                 writeq(val64, &bar0->rx_w_round_robin_3);
1283                 val64 = 0x0001000000000000ULL;
1284                 writeq(val64, &bar0->rx_w_round_robin_4);
1285
1286                 val64 = 0x8080404020201008ULL;
1287                 writeq(val64, &bar0->rts_qos_steering);
1288                 break;
1289         case 6:
1290                 val64 = 0x0001020304000102ULL;
1291                 writeq(val64, &bar0->rx_w_round_robin_0);
1292                 val64 = 0x0304050001020001ULL;
1293                 writeq(val64, &bar0->rx_w_round_robin_1);
1294                 val64 = 0x0203000100000102ULL;
1295                 writeq(val64, &bar0->rx_w_round_robin_2);
1296                 val64 = 0x0304000102030405ULL;
1297                 writeq(val64, &bar0->rx_w_round_robin_3);
1298                 val64 = 0x0001000200000000ULL;
1299                 writeq(val64, &bar0->rx_w_round_robin_4);
1300
1301                 val64 = 0x8080404020100804ULL;
1302                 writeq(val64, &bar0->rts_qos_steering);
1303                 break;
1304         case 7:
1305                 val64 = 0x0001020001020300ULL;
1306                 writeq(val64, &bar0->rx_w_round_robin_0);
1307                 val64 = 0x0102030400010203ULL;
1308                 writeq(val64, &bar0->rx_w_round_robin_1);
1309                 val64 = 0x0405060001020001ULL;
1310                 writeq(val64, &bar0->rx_w_round_robin_2);
1311                 val64 = 0x0304050000010200ULL;
1312                 writeq(val64, &bar0->rx_w_round_robin_3);
1313                 val64 = 0x0102030000000000ULL;
1314                 writeq(val64, &bar0->rx_w_round_robin_4);
1315
1316                 val64 = 0x8080402010080402ULL;
1317                 writeq(val64, &bar0->rts_qos_steering);
1318                 break;
1319         case 8:
1320                 val64 = 0x0001020300040105ULL;
1321                 writeq(val64, &bar0->rx_w_round_robin_0);
1322                 val64 = 0x0200030106000204ULL;
1323                 writeq(val64, &bar0->rx_w_round_robin_1);
1324                 val64 = 0x0103000502010007ULL;
1325                 writeq(val64, &bar0->rx_w_round_robin_2);
1326                 val64 = 0x0304010002060500ULL;
1327                 writeq(val64, &bar0->rx_w_round_robin_3);
1328                 val64 = 0x0103020400000000ULL;
1329                 writeq(val64, &bar0->rx_w_round_robin_4);
1330
1331                 val64 = 0x8040201008040201ULL;
1332                 writeq(val64, &bar0->rts_qos_steering);
1333                 break;
1334         }
1335
1336         /* UDP Fix */
1337         val64 = 0;
1338         for (i = 0; i < 8; i++)
1339                 writeq(val64, &bar0->rts_frm_len_n[i]);
1340
1341         /* Set the default rts frame length for the rings configured */
1342         val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1343         for (i = 0 ; i < config->rx_ring_num ; i++)
1344                 writeq(val64, &bar0->rts_frm_len_n[i]);
1345
1346         /* Set the frame length for the configured rings
1347          * desired by the user
1348          */
1349         for (i = 0; i < config->rx_ring_num; i++) {
1350                 /* If rts_frm_len[i] == 0 then it is assumed that user not
1351                  * specified frame length steering.
1352                  * If the user provides the frame length then program
1353                  * the rts_frm_len register for those values or else
1354                  * leave it as it is.
1355                  */
1356                 if (rts_frm_len[i] != 0) {
1357                         writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1358                                 &bar0->rts_frm_len_n[i]);
1359                 }
1360         }
1361
1362         /* Program statistics memory */
1363         writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1364
1365         if (nic->device_type == XFRAME_II_DEVICE) {
1366                 val64 = STAT_BC(0x320);
1367                 writeq(val64, &bar0->stat_byte_cnt);
1368         }
1369
1370         /*
1371          * Initializing the sampling rate for the device to calculate the
1372          * bandwidth utilization.
1373          */
1374         val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1375             MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1376         writeq(val64, &bar0->mac_link_util);
1377
1378
1379         /*
1380          * Initializing the Transmit and Receive Traffic Interrupt
1381          * Scheme.
1382          */
1383         /*
1384          * TTI Initialization. Default Tx timer gets us about
1385          * 250 interrupts per sec. Continuous interrupts are enabled
1386          * by default.
1387          */
1388         if (nic->device_type == XFRAME_II_DEVICE) {
1389                 int count = (nic->config.bus_speed * 125)/2;
1390                 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1391         } else {
1392
1393                 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1394         }
1395         val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1396             TTI_DATA1_MEM_TX_URNG_B(0x10) |
1397             TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1398                 if (use_continuous_tx_intrs)
1399                         val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1400         writeq(val64, &bar0->tti_data1_mem);
1401
1402         val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1403             TTI_DATA2_MEM_TX_UFC_B(0x20) |
1404             TTI_DATA2_MEM_TX_UFC_C(0x70) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1405         writeq(val64, &bar0->tti_data2_mem);
1406
1407         val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1408         writeq(val64, &bar0->tti_command_mem);
1409
1410         /*
1411          * Once the operation completes, the Strobe bit of the command
1412          * register will be reset. We poll for this particular condition
1413          * We wait for a maximum of 500ms for the operation to complete,
1414          * if it's not complete by then we return error.
1415          */
1416         time = 0;
1417         while (TRUE) {
1418                 val64 = readq(&bar0->tti_command_mem);
1419                 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1420                         break;
1421                 }
1422                 if (time > 10) {
1423                         DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1424                                   dev->name);
1425                         return -1;
1426                 }
1427                 msleep(50);
1428                 time++;
1429         }
1430
1431         if (nic->config.bimodal) {
1432                 int k = 0;
1433                 for (k = 0; k < config->rx_ring_num; k++) {
1434                         val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1435                         val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1436                         writeq(val64, &bar0->tti_command_mem);
1437
1438                 /*
1439                  * Once the operation completes, the Strobe bit of the command
1440                  * register will be reset. We poll for this particular condition
1441                  * We wait for a maximum of 500ms for the operation to complete,
1442                  * if it's not complete by then we return error.
1443                 */
1444                         time = 0;
1445                         while (TRUE) {
1446                                 val64 = readq(&bar0->tti_command_mem);
1447                                 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1448                                         break;
1449                                 }
1450                                 if (time > 10) {
1451                                         DBG_PRINT(ERR_DBG,
1452                                                 "%s: TTI init Failed\n",
1453                                         dev->name);
1454                                         return -1;
1455                                 }
1456                                 time++;
1457                                 msleep(50);
1458                         }
1459                 }
1460         } else {
1461
1462                 /* RTI Initialization */
1463                 if (nic->device_type == XFRAME_II_DEVICE) {
1464                         /*
1465                          * Programmed to generate Apprx 500 Intrs per
1466                          * second
1467                          */
1468                         int count = (nic->config.bus_speed * 125)/4;
1469                         val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1470                 } else {
1471                         val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1472                 }
1473                 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1474                     RTI_DATA1_MEM_RX_URNG_B(0x10) |
1475                     RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1476
1477                 writeq(val64, &bar0->rti_data1_mem);
1478
1479                 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1480                     RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1481                 if (nic->intr_type == MSI_X)
1482                     val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1483                                 RTI_DATA2_MEM_RX_UFC_D(0x40));
1484                 else
1485                     val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1486                                 RTI_DATA2_MEM_RX_UFC_D(0x80));
1487                 writeq(val64, &bar0->rti_data2_mem);
1488
1489                 for (i = 0; i < config->rx_ring_num; i++) {
1490                         val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1491                                         | RTI_CMD_MEM_OFFSET(i);
1492                         writeq(val64, &bar0->rti_command_mem);
1493
1494                         /*
1495                          * Once the operation completes, the Strobe bit of the
1496                          * command register will be reset. We poll for this
1497                          * particular condition. We wait for a maximum of 500ms
1498                          * for the operation to complete, if it's not complete
1499                          * by then we return error.
1500                          */
1501                         time = 0;
1502                         while (TRUE) {
1503                                 val64 = readq(&bar0->rti_command_mem);
1504                                 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1505                                         break;
1506                                 }
1507                                 if (time > 10) {
1508                                         DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1509                                                   dev->name);
1510                                         return -1;
1511                                 }
1512                                 time++;
1513                                 msleep(50);
1514                         }
1515                 }
1516         }
1517
1518         /*
1519          * Initializing proper values as Pause threshold into all
1520          * the 8 Queues on Rx side.
1521          */
1522         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1523         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1524
1525         /* Disable RMAC PAD STRIPPING */
1526         add = &bar0->mac_cfg;
1527         val64 = readq(&bar0->mac_cfg);
1528         val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1529         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1530         writel((u32) (val64), add);
1531         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1532         writel((u32) (val64 >> 32), (add + 4));
1533         val64 = readq(&bar0->mac_cfg);
1534
1535         /* Enable FCS stripping by adapter */
1536         add = &bar0->mac_cfg;
1537         val64 = readq(&bar0->mac_cfg);
1538         val64 |= MAC_CFG_RMAC_STRIP_FCS;
1539         if (nic->device_type == XFRAME_II_DEVICE)
1540                 writeq(val64, &bar0->mac_cfg);
1541         else {
1542                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1543                 writel((u32) (val64), add);
1544                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1545                 writel((u32) (val64 >> 32), (add + 4));
1546         }
1547
1548         /*
1549          * Set the time value to be inserted in the pause frame
1550          * generated by xena.
1551          */
1552         val64 = readq(&bar0->rmac_pause_cfg);
1553         val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1554         val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1555         writeq(val64, &bar0->rmac_pause_cfg);
1556
1557         /*
1558          * Set the Threshold Limit for Generating the pause frame
1559          * If the amount of data in any Queue exceeds ratio of
1560          * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1561          * pause frame is generated
1562          */
1563         val64 = 0;
1564         for (i = 0; i < 4; i++) {
1565                 val64 |=
1566                     (((u64) 0xFF00 | nic->mac_control.
1567                       mc_pause_threshold_q0q3)
1568                      << (i * 2 * 8));
1569         }
1570         writeq(val64, &bar0->mc_pause_thresh_q0q3);
1571
1572         val64 = 0;
1573         for (i = 0; i < 4; i++) {
1574                 val64 |=
1575                     (((u64) 0xFF00 | nic->mac_control.
1576                       mc_pause_threshold_q4q7)
1577                      << (i * 2 * 8));
1578         }
1579         writeq(val64, &bar0->mc_pause_thresh_q4q7);
1580
1581         /*
1582          * TxDMA will stop Read request if the number of read split has
1583          * exceeded the limit pointed by shared_splits
1584          */
1585         val64 = readq(&bar0->pic_control);
1586         val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1587         writeq(val64, &bar0->pic_control);
1588
1589         if (nic->config.bus_speed == 266) {
1590                 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1591                 writeq(0x0, &bar0->read_retry_delay);
1592                 writeq(0x0, &bar0->write_retry_delay);
1593         }
1594
1595         /*
1596          * Programming the Herc to split every write transaction
1597          * that does not start on an ADB to reduce disconnects.
1598          */
1599         if (nic->device_type == XFRAME_II_DEVICE) {
1600                 val64 = EXT_REQ_EN | MISC_LINK_STABILITY_PRD(3);
1601                 writeq(val64, &bar0->misc_control);
1602                 val64 = readq(&bar0->pic_control2);
1603                 val64 &= ~(BIT(13)|BIT(14)|BIT(15));
1604                 writeq(val64, &bar0->pic_control2);
1605         }
1606         if (strstr(nic->product_name, "CX4")) {
1607                 val64 = TMAC_AVG_IPG(0x17);
1608                 writeq(val64, &bar0->tmac_avg_ipg);
1609         }
1610
1611         return SUCCESS;
1612 }
1613 #define LINK_UP_DOWN_INTERRUPT          1
1614 #define MAC_RMAC_ERR_TIMER              2
1615
1616 static int s2io_link_fault_indication(nic_t *nic)
1617 {
1618         if (nic->intr_type != INTA)
1619                 return MAC_RMAC_ERR_TIMER;
1620         if (nic->device_type == XFRAME_II_DEVICE)
1621                 return LINK_UP_DOWN_INTERRUPT;
1622         else
1623                 return MAC_RMAC_ERR_TIMER;
1624 }
1625
1626 /**
1627  *  en_dis_able_nic_intrs - Enable or Disable the interrupts
1628  *  @nic: device private variable,
1629  *  @mask: A mask indicating which Intr block must be modified and,
1630  *  @flag: A flag indicating whether to enable or disable the Intrs.
1631  *  Description: This function will either disable or enable the interrupts
1632  *  depending on the flag argument. The mask argument can be used to
1633  *  enable/disable any Intr block.
1634  *  Return Value: NONE.
1635  */
1636
1637 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1638 {
1639         XENA_dev_config_t __iomem *bar0 = nic->bar0;
1640         register u64 val64 = 0, temp64 = 0;
1641
1642         /*  Top level interrupt classification */
1643         /*  PIC Interrupts */
1644         if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1645                 /*  Enable PIC Intrs in the general intr mask register */
1646                 val64 = TXPIC_INT_M | PIC_RX_INT_M;
1647                 if (flag == ENABLE_INTRS) {
1648                         temp64 = readq(&bar0->general_int_mask);
1649                         temp64 &= ~((u64) val64);
1650                         writeq(temp64, &bar0->general_int_mask);
1651                         /*
1652                          * If Hercules adapter enable GPIO otherwise
1653                          * disabled all PCIX, Flash, MDIO, IIC and GPIO
1654                          * interrupts for now.
1655                          * TODO
1656                          */
1657                         if (s2io_link_fault_indication(nic) ==
1658                                         LINK_UP_DOWN_INTERRUPT ) {
1659                                 temp64 = readq(&bar0->pic_int_mask);
1660                                 temp64 &= ~((u64) PIC_INT_GPIO);
1661                                 writeq(temp64, &bar0->pic_int_mask);
1662                                 temp64 = readq(&bar0->gpio_int_mask);
1663                                 temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP);
1664                                 writeq(temp64, &bar0->gpio_int_mask);
1665                         } else {
1666                                 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1667                         }
1668                         /*
1669                          * No MSI Support is available presently, so TTI and
1670                          * RTI interrupts are also disabled.
1671                          */
1672                 } else if (flag == DISABLE_INTRS) {
1673                         /*
1674                          * Disable PIC Intrs in the general
1675                          * intr mask register
1676                          */
1677                         writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1678                         temp64 = readq(&bar0->general_int_mask);
1679                         val64 |= temp64;
1680                         writeq(val64, &bar0->general_int_mask);
1681                 }
1682         }
1683
1684         /*  DMA Interrupts */
1685         /*  Enabling/Disabling Tx DMA interrupts */
1686         if (mask & TX_DMA_INTR) {
1687                 /* Enable TxDMA Intrs in the general intr mask register */
1688                 val64 = TXDMA_INT_M;
1689                 if (flag == ENABLE_INTRS) {
1690                         temp64 = readq(&bar0->general_int_mask);
1691                         temp64 &= ~((u64) val64);
1692                         writeq(temp64, &bar0->general_int_mask);
1693                         /*
1694                          * Keep all interrupts other than PFC interrupt
1695                          * and PCC interrupt disabled in DMA level.
1696                          */
1697                         val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1698                                                       TXDMA_PCC_INT_M);
1699                         writeq(val64, &bar0->txdma_int_mask);
1700                         /*
1701                          * Enable only the MISC error 1 interrupt in PFC block
1702                          */
1703                         val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1704                         writeq(val64, &bar0->pfc_err_mask);
1705                         /*
1706                          * Enable only the FB_ECC error interrupt in PCC block
1707                          */
1708                         val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1709                         writeq(val64, &bar0->pcc_err_mask);
1710                 } else if (flag == DISABLE_INTRS) {
1711                         /*
1712                          * Disable TxDMA Intrs in the general intr mask
1713                          * register
1714                          */
1715                         writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1716                         writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1717                         temp64 = readq(&bar0->general_int_mask);
1718                         val64 |= temp64;
1719                         writeq(val64, &bar0->general_int_mask);
1720                 }
1721         }
1722
1723         /*  Enabling/Disabling Rx DMA interrupts */
1724         if (mask & RX_DMA_INTR) {
1725                 /*  Enable RxDMA Intrs in the general intr mask register */
1726                 val64 = RXDMA_INT_M;
1727                 if (flag == ENABLE_INTRS) {
1728                         temp64 = readq(&bar0->general_int_mask);
1729                         temp64 &= ~((u64) val64);
1730                         writeq(temp64, &bar0->general_int_mask);
1731                         /*
1732                          * All RxDMA block interrupts are disabled for now
1733                          * TODO
1734                          */
1735                         writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1736                 } else if (flag == DISABLE_INTRS) {
1737                         /*
1738                          * Disable RxDMA Intrs in the general intr mask
1739                          * register
1740                          */
1741                         writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1742                         temp64 = readq(&bar0->general_int_mask);
1743                         val64 |= temp64;
1744                         writeq(val64, &bar0->general_int_mask);
1745                 }
1746         }
1747
1748         /*  MAC Interrupts */
1749         /*  Enabling/Disabling MAC interrupts */
1750         if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1751                 val64 = TXMAC_INT_M | RXMAC_INT_M;
1752                 if (flag == ENABLE_INTRS) {
1753                         temp64 = readq(&bar0->general_int_mask);
1754                         temp64 &= ~((u64) val64);
1755                         writeq(temp64, &bar0->general_int_mask);
1756                         /*
1757                          * All MAC block error interrupts are disabled for now
1758                          * TODO
1759                          */
1760                 } else if (flag == DISABLE_INTRS) {
1761                         /*
1762                          * Disable MAC Intrs in the general intr mask register
1763                          */
1764                         writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1765                         writeq(DISABLE_ALL_INTRS,
1766                                &bar0->mac_rmac_err_mask);
1767
1768                         temp64 = readq(&bar0->general_int_mask);
1769                         val64 |= temp64;
1770                         writeq(val64, &bar0->general_int_mask);
1771                 }
1772         }
1773
1774         /*  XGXS Interrupts */
1775         if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1776                 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1777                 if (flag == ENABLE_INTRS) {
1778                         temp64 = readq(&bar0->general_int_mask);
1779                         temp64 &= ~((u64) val64);
1780                         writeq(temp64, &bar0->general_int_mask);
1781                         /*
1782                          * All XGXS block error interrupts are disabled for now
1783                          * TODO
1784                          */
1785                         writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1786                 } else if (flag == DISABLE_INTRS) {
1787                         /*
1788                          * Disable MC Intrs in the general intr mask register
1789                          */
1790                         writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1791                         temp64 = readq(&bar0->general_int_mask);
1792                         val64 |= temp64;
1793                         writeq(val64, &bar0->general_int_mask);
1794                 }
1795         }
1796
1797         /*  Memory Controller(MC) interrupts */
1798         if (mask & MC_INTR) {
1799                 val64 = MC_INT_M;
1800                 if (flag == ENABLE_INTRS) {
1801                         temp64 = readq(&bar0->general_int_mask);
1802                         temp64 &= ~((u64) val64);
1803                         writeq(temp64, &bar0->general_int_mask);
1804                         /*
1805                          * Enable all MC Intrs.
1806                          */
1807                         writeq(0x0, &bar0->mc_int_mask);
1808                         writeq(0x0, &bar0->mc_err_mask);
1809                 } else if (flag == DISABLE_INTRS) {
1810                         /*
1811                          * Disable MC Intrs in the general intr mask register
1812                          */
1813                         writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1814                         temp64 = readq(&bar0->general_int_mask);
1815                         val64 |= temp64;
1816                         writeq(val64, &bar0->general_int_mask);
1817                 }
1818         }
1819
1820
1821         /*  Tx traffic interrupts */
1822         if (mask & TX_TRAFFIC_INTR) {
1823                 val64 = TXTRAFFIC_INT_M;
1824                 if (flag == ENABLE_INTRS) {
1825                         temp64 = readq(&bar0->general_int_mask);
1826                         temp64 &= ~((u64) val64);
1827                         writeq(temp64, &bar0->general_int_mask);
1828                         /*
1829                          * Enable all the Tx side interrupts
1830                          * writing 0 Enables all 64 TX interrupt levels
1831                          */
1832                         writeq(0x0, &bar0->tx_traffic_mask);
1833                 } else if (flag == DISABLE_INTRS) {
1834                         /*
1835                          * Disable Tx Traffic Intrs in the general intr mask
1836                          * register.
1837                          */
1838                         writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1839                         temp64 = readq(&bar0->general_int_mask);
1840                         val64 |= temp64;
1841                         writeq(val64, &bar0->general_int_mask);
1842                 }
1843         }
1844
1845         /*  Rx traffic interrupts */
1846         if (mask & RX_TRAFFIC_INTR) {
1847                 val64 = RXTRAFFIC_INT_M;
1848                 if (flag == ENABLE_INTRS) {
1849                         temp64 = readq(&bar0->general_int_mask);
1850                         temp64 &= ~((u64) val64);
1851                         writeq(temp64, &bar0->general_int_mask);
1852                         /* writing 0 Enables all 8 RX interrupt levels */
1853                         writeq(0x0, &bar0->rx_traffic_mask);
1854                 } else if (flag == DISABLE_INTRS) {
1855                         /*
1856                          * Disable Rx Traffic Intrs in the general intr mask
1857                          * register.
1858                          */
1859                         writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1860                         temp64 = readq(&bar0->general_int_mask);
1861                         val64 |= temp64;
1862                         writeq(val64, &bar0->general_int_mask);
1863                 }
1864         }
1865 }
1866
1867 static int check_prc_pcc_state(u64 val64, int flag, int rev_id, int herc)
1868 {
1869         int ret = 0;
1870
1871         if (flag == FALSE) {
1872                 if ((!herc && (rev_id >= 4)) || herc) {
1873                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1874                             ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1875                              ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1876                                 ret = 1;
1877                         }
1878                 }else {
1879                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1880                             ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1881                              ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1882                                 ret = 1;
1883                         }
1884                 }
1885         } else {
1886                 if ((!herc && (rev_id >= 4)) || herc) {
1887                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1888                              ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1889                             (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1890                              ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1891                               ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1892                                 ret = 1;
1893                         }
1894                 } else {
1895                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1896                              ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1897                             (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1898                              ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1899                               ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1900                                 ret = 1;
1901                         }
1902                 }
1903         }
1904
1905         return ret;
1906 }
1907 /**
1908  *  verify_xena_quiescence - Checks whether the H/W is ready
1909  *  @val64 :  Value read from adapter status register.
1910  *  @flag : indicates if the adapter enable bit was ever written once
1911  *  before.
1912  *  Description: Returns whether the H/W is ready to go or not. Depending
1913  *  on whether adapter enable bit was written or not the comparison
1914  *  differs and the calling function passes the input argument flag to
1915  *  indicate this.
1916  *  Return: 1 If xena is quiescence
1917  *          0 If Xena is not quiescence
1918  */
1919
1920 static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag)
1921 {
1922         int ret = 0, herc;
1923         u64 tmp64 = ~((u64) val64);
1924         int rev_id = get_xena_rev_id(sp->pdev);
1925
1926         herc = (sp->device_type == XFRAME_II_DEVICE);
1927         if (!
1928             (tmp64 &
1929              (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
1930               ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY |
1931               ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
1932               ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
1933               ADAPTER_STATUS_P_PLL_LOCK))) {
1934                 ret = check_prc_pcc_state(val64, flag, rev_id, herc);
1935         }
1936
1937         return ret;
1938 }
1939
1940 /**
1941  * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
1942  * @sp: Pointer to device specifc structure
1943  * Description :
1944  * New procedure to clear mac address reading  problems on Alpha platforms
1945  *
1946  */
1947
1948 static void fix_mac_address(nic_t * sp)
1949 {
1950         XENA_dev_config_t __iomem *bar0 = sp->bar0;
1951         u64 val64;
1952         int i = 0;
1953
1954         while (fix_mac[i] != END_SIGN) {
1955                 writeq(fix_mac[i++], &bar0->gpio_control);
1956                 udelay(10);
1957                 val64 = readq(&bar0->gpio_control);
1958         }
1959 }
1960
1961 /**
1962  *  start_nic - Turns the device on
1963  *  @nic : device private variable.
1964  *  Description:
1965  *  This function actually turns the device on. Before this  function is
1966  *  called,all Registers are configured from their reset states
1967  *  and shared memory is allocated but the NIC is still quiescent. On
1968  *  calling this function, the device interrupts are cleared and the NIC is
1969  *  literally switched on by writing into the adapter control register.
1970  *  Return Value:
1971  *  SUCCESS on success and -1 on failure.
1972  */
1973
1974 static int start_nic(struct s2io_nic *nic)
1975 {
1976         XENA_dev_config_t __iomem *bar0 = nic->bar0;
1977         struct net_device *dev = nic->dev;
1978         register u64 val64 = 0;
1979         u16 interruptible;
1980         u16 subid, i;
1981         mac_info_t *mac_control;
1982         struct config_param *config;
1983
1984         mac_control = &nic->mac_control;
1985         config = &nic->config;
1986
1987         /*  PRC Initialization and configuration */
1988         for (i = 0; i < config->rx_ring_num; i++) {
1989                 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1990                        &bar0->prc_rxd0_n[i]);
1991
1992                 val64 = readq(&bar0->prc_ctrl_n[i]);
1993                 if (nic->config.bimodal)
1994                         val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
1995                 if (nic->rxd_mode == RXD_MODE_1)
1996                         val64 |= PRC_CTRL_RC_ENABLED;
1997                 else
1998                         val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1999                 if (nic->device_type == XFRAME_II_DEVICE)
2000                         val64 |= PRC_CTRL_GROUP_READS;
2001                 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2002                 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2003                 writeq(val64, &bar0->prc_ctrl_n[i]);
2004         }
2005
2006         if (nic->rxd_mode == RXD_MODE_3B) {
2007                 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2008                 val64 = readq(&bar0->rx_pa_cfg);
2009                 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2010                 writeq(val64, &bar0->rx_pa_cfg);
2011         }
2012
2013         /*
2014          * Enabling MC-RLDRAM. After enabling the device, we timeout
2015          * for around 100ms, which is approximately the time required
2016          * for the device to be ready for operation.
2017          */
2018         val64 = readq(&bar0->mc_rldram_mrs);
2019         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2020         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2021         val64 = readq(&bar0->mc_rldram_mrs);
2022
2023         msleep(100);    /* Delay by around 100 ms. */
2024
2025         /* Enabling ECC Protection. */
2026         val64 = readq(&bar0->adapter_control);
2027         val64 &= ~ADAPTER_ECC_EN;
2028         writeq(val64, &bar0->adapter_control);
2029
2030         /*
2031          * Clearing any possible Link state change interrupts that
2032          * could have popped up just before Enabling the card.
2033          */
2034         val64 = readq(&bar0->mac_rmac_err_reg);
2035         if (val64)
2036                 writeq(val64, &bar0->mac_rmac_err_reg);
2037
2038         /*
2039          * Verify if the device is ready to be enabled, if so enable
2040          * it.
2041          */
2042         val64 = readq(&bar0->adapter_status);
2043         if (!verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
2044                 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2045                 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2046                           (unsigned long long) val64);
2047                 return FAILURE;
2048         }
2049
2050         /*  Enable select interrupts */
2051         if (nic->intr_type != INTA)
2052                 en_dis_able_nic_intrs(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2053         else {
2054                 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2055                 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2056                 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
2057                 en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
2058         }
2059
2060         /*
2061          * With some switches, link might be already up at this point.
2062          * Because of this weird behavior, when we enable laser,
2063          * we may not get link. We need to handle this. We cannot
2064          * figure out which switch is misbehaving. So we are forced to
2065          * make a global change.
2066          */
2067
2068         /* Enabling Laser. */
2069         val64 = readq(&bar0->adapter_control);
2070         val64 |= ADAPTER_EOI_TX_ON;
2071         writeq(val64, &bar0->adapter_control);
2072
2073         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2074                 /*
2075                  * Dont see link state interrupts initally on some switches,
2076                  * so directly scheduling the link state task here.
2077                  */
2078                 schedule_work(&nic->set_link_task);
2079         }
2080         /* SXE-002: Initialize link and activity LED */
2081         subid = nic->pdev->subsystem_device;
2082         if (((subid & 0xFF) >= 0x07) &&
2083             (nic->device_type == XFRAME_I_DEVICE)) {
2084                 val64 = readq(&bar0->gpio_control);
2085                 val64 |= 0x0000800000000000ULL;
2086                 writeq(val64, &bar0->gpio_control);
2087                 val64 = 0x0411040400000000ULL;
2088                 writeq(val64, (void __iomem *)bar0 + 0x2700);
2089         }
2090
2091         return SUCCESS;
2092 }
2093 /**
2094  * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2095  */
2096 static struct sk_buff *s2io_txdl_getskb(fifo_info_t *fifo_data, TxD_t *txdlp, int get_off)
2097 {
2098         nic_t *nic = fifo_data->nic;
2099         struct sk_buff *skb;
2100         TxD_t *txds;
2101         u16 j, frg_cnt;
2102
2103         txds = txdlp;
2104         if (txds->Host_Control == (u64)(long)nic->ufo_in_band_v) {
2105                 pci_unmap_single(nic->pdev, (dma_addr_t)
2106                         txds->Buffer_Pointer, sizeof(u64),
2107                         PCI_DMA_TODEVICE);
2108                 txds++;
2109         }
2110
2111         skb = (struct sk_buff *) ((unsigned long)
2112                         txds->Host_Control);
2113         if (!skb) {
2114                 memset(txdlp, 0, (sizeof(TxD_t) * fifo_data->max_txds));
2115                 return NULL;
2116         }
2117         pci_unmap_single(nic->pdev, (dma_addr_t)
2118                          txds->Buffer_Pointer,
2119                          skb->len - skb->data_len,
2120                          PCI_DMA_TODEVICE);
2121         frg_cnt = skb_shinfo(skb)->nr_frags;
2122         if (frg_cnt) {
2123                 txds++;
2124                 for (j = 0; j < frg_cnt; j++, txds++) {
2125                         skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2126                         if (!txds->Buffer_Pointer)
2127                                 break;
2128                         pci_unmap_page(nic->pdev, (dma_addr_t) 
2129                                         txds->Buffer_Pointer,
2130                                        frag->size, PCI_DMA_TODEVICE);
2131                 }
2132         }
2133         txdlp->Host_Control = 0;
2134         return(skb);
2135 }
2136
2137 /**
2138  *  free_tx_buffers - Free all queued Tx buffers
2139  *  @nic : device private variable.
2140  *  Description:
2141  *  Free all queued Tx buffers.
2142  *  Return Value: void
2143 */
2144
2145 static void free_tx_buffers(struct s2io_nic *nic)
2146 {
2147         struct net_device *dev = nic->dev;
2148         struct sk_buff *skb;
2149         TxD_t *txdp;
2150         int i, j;
2151         mac_info_t *mac_control;
2152         struct config_param *config;
2153         int cnt = 0;
2154
2155         mac_control = &nic->mac_control;
2156         config = &nic->config;
2157
2158         for (i = 0; i < config->tx_fifo_num; i++) {
2159                 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2160                         txdp = (TxD_t *) mac_control->fifos[i].list_info[j].
2161                             list_virt_addr;
2162                         skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2163                         if (skb) {
2164                                 dev_kfree_skb(skb);
2165                                 cnt++;
2166                         }
2167                 }
2168                 DBG_PRINT(INTR_DBG,
2169                           "%s:forcibly freeing %d skbs on FIFO%d\n",
2170                           dev->name, cnt, i);
2171                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2172                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2173         }
2174 }
2175
2176 /**
2177  *   stop_nic -  To stop the nic
2178  *   @nic ; device private variable.
2179  *   Description:
2180  *   This function does exactly the opposite of what the start_nic()
2181  *   function does. This function is called to stop the device.
2182  *   Return Value:
2183  *   void.
2184  */
2185
2186 static void stop_nic(struct s2io_nic *nic)
2187 {
2188         XENA_dev_config_t __iomem *bar0 = nic->bar0;
2189         register u64 val64 = 0;
2190         u16 interruptible;
2191         mac_info_t *mac_control;
2192         struct config_param *config;
2193
2194         mac_control = &nic->mac_control;
2195         config = &nic->config;
2196
2197         /*  Disable all interrupts */
2198         interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2199         interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2200         interruptible |= TX_MAC_INTR | RX_MAC_INTR;
2201         en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2202
2203         /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2204         val64 = readq(&bar0->adapter_control);
2205         val64 &= ~(ADAPTER_CNTL_EN);
2206         writeq(val64, &bar0->adapter_control);
2207 }
2208
2209 static int fill_rxd_3buf(nic_t *nic, RxD_t *rxdp, struct sk_buff *skb)
2210 {
2211         struct net_device *dev = nic->dev;
2212         struct sk_buff *frag_list;
2213         void *tmp;
2214
2215         /* Buffer-1 receives L3/L4 headers */
2216         ((RxD3_t*)rxdp)->Buffer1_ptr = pci_map_single
2217                         (nic->pdev, skb->data, l3l4hdr_size + 4,
2218                         PCI_DMA_FROMDEVICE);
2219
2220         /* skb_shinfo(skb)->frag_list will have L4 data payload */
2221         skb_shinfo(skb)->frag_list = dev_alloc_skb(dev->mtu + ALIGN_SIZE);
2222         if (skb_shinfo(skb)->frag_list == NULL) {
2223                 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n ", dev->name);
2224                 return -ENOMEM ;
2225         }
2226         frag_list = skb_shinfo(skb)->frag_list;
2227         frag_list->next = NULL;
2228         tmp = (void *)ALIGN((long)frag_list->data, ALIGN_SIZE + 1);
2229         frag_list->data = tmp;
2230         frag_list->tail = tmp;
2231
2232         /* Buffer-2 receives L4 data payload */
2233         ((RxD3_t*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev,
2234                                 frag_list->data, dev->mtu,
2235                                 PCI_DMA_FROMDEVICE);
2236         rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
2237         rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
2238
2239         return SUCCESS;
2240 }
2241
2242 /**
2243  *  fill_rx_buffers - Allocates the Rx side skbs
2244  *  @nic:  device private variable
2245  *  @ring_no: ring number
2246  *  Description:
2247  *  The function allocates Rx side skbs and puts the physical
2248  *  address of these buffers into the RxD buffer pointers, so that the NIC
2249  *  can DMA the received frame into these locations.
2250  *  The NIC supports 3 receive modes, viz
2251  *  1. single buffer,
2252  *  2. three buffer and
2253  *  3. Five buffer modes.
2254  *  Each mode defines how many fragments the received frame will be split
2255  *  up into by the NIC. The frame is split into L3 header, L4 Header,
2256  *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2257  *  is split into 3 fragments. As of now only single buffer mode is
2258  *  supported.
2259  *   Return Value:
2260  *  SUCCESS on success or an appropriate -ve value on failure.
2261  */
2262
2263 static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2264 {
2265         struct net_device *dev = nic->dev;
2266         struct sk_buff *skb;
2267         RxD_t *rxdp;
2268         int off, off1, size, block_no, block_no1;
2269         u32 alloc_tab = 0;
2270         u32 alloc_cnt;
2271         mac_info_t *mac_control;
2272         struct config_param *config;
2273         u64 tmp;
2274         buffAdd_t *ba;
2275 #ifndef CONFIG_S2IO_NAPI
2276         unsigned long flags;
2277 #endif
2278         RxD_t *first_rxdp = NULL;
2279
2280         mac_control = &nic->mac_control;
2281         config = &nic->config;
2282         alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2283             atomic_read(&nic->rx_bufs_left[ring_no]);
2284
2285         block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
2286         off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2287         while (alloc_tab < alloc_cnt) {
2288                 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2289                     block_index;
2290                 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2291
2292                 rxdp = mac_control->rings[ring_no].
2293                                 rx_blocks[block_no].rxds[off].virt_addr;
2294
2295                 if ((block_no == block_no1) && (off == off1) &&
2296                                         (rxdp->Host_Control)) {
2297                         DBG_PRINT(INTR_DBG, "%s: Get and Put",
2298                                   dev->name);
2299                         DBG_PRINT(INTR_DBG, " info equated\n");
2300                         goto end;
2301                 }
2302                 if (off && (off == rxd_count[nic->rxd_mode])) {
2303                         mac_control->rings[ring_no].rx_curr_put_info.
2304                             block_index++;
2305                         if (mac_control->rings[ring_no].rx_curr_put_info.
2306                             block_index == mac_control->rings[ring_no].
2307                                         block_count)
2308                                 mac_control->rings[ring_no].rx_curr_put_info.
2309                                         block_index = 0;
2310                         block_no = mac_control->rings[ring_no].
2311                                         rx_curr_put_info.block_index;
2312                         if (off == rxd_count[nic->rxd_mode])
2313                                 off = 0;
2314                         mac_control->rings[ring_no].rx_curr_put_info.
2315                                 offset = off;
2316                         rxdp = mac_control->rings[ring_no].
2317                                 rx_blocks[block_no].block_virt_addr;
2318                         DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2319                                   dev->name, rxdp);
2320                 }
2321 #ifndef CONFIG_S2IO_NAPI
2322                 spin_lock_irqsave(&nic->put_lock, flags);
2323                 mac_control->rings[ring_no].put_pos =
2324                     (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2325                 spin_unlock_irqrestore(&nic->put_lock, flags);
2326 #endif
2327                 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2328                         ((nic->rxd_mode >= RXD_MODE_3A) &&
2329                                 (rxdp->Control_2 & BIT(0)))) {
2330                         mac_control->rings[ring_no].rx_curr_put_info.
2331                                         offset = off;
2332                         goto end;
2333                 }
2334                 /* calculate size of skb based on ring mode */
2335                 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2336                                 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2337                 if (nic->rxd_mode == RXD_MODE_1)
2338                         size += NET_IP_ALIGN;
2339                 else if (nic->rxd_mode == RXD_MODE_3B)
2340                         size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2341                 else
2342                         size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
2343
2344                 /* allocate skb */
2345                 skb = dev_alloc_skb(size);
2346                 if(!skb) {
2347                         DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
2348                         DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
2349                         if (first_rxdp) {
2350                                 wmb();
2351                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2352                         }
2353                         return -ENOMEM ;
2354                 }
2355                 if (nic->rxd_mode == RXD_MODE_1) {
2356                         /* 1 buffer mode - normal operation mode */
2357                         memset(rxdp, 0, sizeof(RxD1_t));
2358                         skb_reserve(skb, NET_IP_ALIGN);
2359                         ((RxD1_t*)rxdp)->Buffer0_ptr = pci_map_single
2360                             (nic->pdev, skb->data, size - NET_IP_ALIGN,
2361                                 PCI_DMA_FROMDEVICE);
2362                         rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2363
2364                 } else if (nic->rxd_mode >= RXD_MODE_3A) {
2365                         /*
2366                          * 2 or 3 buffer mode -
2367                          * Both 2 buffer mode and 3 buffer mode provides 128
2368                          * byte aligned receive buffers.
2369                          *
2370                          * 3 buffer mode provides header separation where in
2371                          * skb->data will have L3/L4 headers where as
2372                          * skb_shinfo(skb)->frag_list will have the L4 data
2373                          * payload
2374                          */
2375
2376                         memset(rxdp, 0, sizeof(RxD3_t));
2377                         ba = &mac_control->rings[ring_no].ba[block_no][off];
2378                         skb_reserve(skb, BUF0_LEN);
2379                         tmp = (u64)(unsigned long) skb->data;
2380                         tmp += ALIGN_SIZE;
2381                         tmp &= ~ALIGN_SIZE;
2382                         skb->data = (void *) (unsigned long)tmp;
2383                         skb->tail = (void *) (unsigned long)tmp;
2384
2385                         ((RxD3_t*)rxdp)->Buffer0_ptr =
2386                             pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2387                                            PCI_DMA_FROMDEVICE);
2388                         rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2389                         if (nic->rxd_mode == RXD_MODE_3B) {
2390                                 /* Two buffer mode */
2391
2392                                 /*
2393                                  * Buffer2 will have L3/L4 header plus 
2394                                  * L4 payload
2395                                  */
2396                                 ((RxD3_t*)rxdp)->Buffer2_ptr = pci_map_single
2397                                 (nic->pdev, skb->data, dev->mtu + 4,
2398                                                 PCI_DMA_FROMDEVICE);
2399
2400                                 /* Buffer-1 will be dummy buffer not used */
2401                                 ((RxD3_t*)rxdp)->Buffer1_ptr =
2402                                 pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
2403                                         PCI_DMA_FROMDEVICE);
2404                                 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2405                                 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2406                                                                 (dev->mtu + 4);
2407                         } else {
2408                                 /* 3 buffer mode */
2409                                 if (fill_rxd_3buf(nic, rxdp, skb) == -ENOMEM) {
2410                                         dev_kfree_skb_irq(skb);
2411                                         if (first_rxdp) {
2412                                                 wmb();
2413                                                 first_rxdp->Control_1 |=
2414                                                         RXD_OWN_XENA;
2415                                         }
2416                                         return -ENOMEM ;
2417                                 }
2418                         }
2419                         rxdp->Control_2 |= BIT(0);
2420                 }
2421                 rxdp->Host_Control = (unsigned long) (skb);
2422                 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2423                         rxdp->Control_1 |= RXD_OWN_XENA;
2424                 off++;
2425                 if (off == (rxd_count[nic->rxd_mode] + 1))
2426                         off = 0;
2427                 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2428
2429                 rxdp->Control_2 |= SET_RXD_MARKER;
2430                 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2431                         if (first_rxdp) {
2432                                 wmb();
2433                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2434                         }
2435                         first_rxdp = rxdp;
2436                 }
2437                 atomic_inc(&nic->rx_bufs_left[ring_no]);
2438                 alloc_tab++;
2439         }
2440
2441       end:
2442         /* Transfer ownership of first descriptor to adapter just before
2443          * exiting. Before that, use memory barrier so that ownership
2444          * and other fields are seen by adapter correctly.
2445          */
2446         if (first_rxdp) {
2447                 wmb();
2448                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2449         }
2450
2451         return SUCCESS;
2452 }
2453
2454 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2455 {
2456         struct net_device *dev = sp->dev;
2457         int j;
2458         struct sk_buff *skb;
2459         RxD_t *rxdp;
2460         mac_info_t *mac_control;
2461         buffAdd_t *ba;
2462
2463         mac_control = &sp->mac_control;
2464         for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2465                 rxdp = mac_control->rings[ring_no].
2466                                 rx_blocks[blk].rxds[j].virt_addr;
2467                 skb = (struct sk_buff *)
2468                         ((unsigned long) rxdp->Host_Control);
2469                 if (!skb) {
2470                         continue;
2471                 }
2472                 if (sp->rxd_mode == RXD_MODE_1) {
2473                         pci_unmap_single(sp->pdev, (dma_addr_t)
2474                                  ((RxD1_t*)rxdp)->Buffer0_ptr,
2475                                  dev->mtu +
2476                                  HEADER_ETHERNET_II_802_3_SIZE
2477                                  + HEADER_802_2_SIZE +
2478                                  HEADER_SNAP_SIZE,
2479                                  PCI_DMA_FROMDEVICE);
2480                         memset(rxdp, 0, sizeof(RxD1_t));
2481                 } else if(sp->rxd_mode == RXD_MODE_3B) {
2482                         ba = &mac_control->rings[ring_no].
2483                                 ba[blk][j];
2484                         pci_unmap_single(sp->pdev, (dma_addr_t)
2485                                  ((RxD3_t*)rxdp)->Buffer0_ptr,
2486                                  BUF0_LEN,
2487                                  PCI_DMA_FROMDEVICE);
2488                         pci_unmap_single(sp->pdev, (dma_addr_t)
2489                                  ((RxD3_t*)rxdp)->Buffer1_ptr,
2490                                  BUF1_LEN,
2491                                  PCI_DMA_FROMDEVICE);
2492                         pci_unmap_single(sp->pdev, (dma_addr_t)
2493                                  ((RxD3_t*)rxdp)->Buffer2_ptr,
2494                                  dev->mtu + 4,
2495                                  PCI_DMA_FROMDEVICE);
2496                         memset(rxdp, 0, sizeof(RxD3_t));
2497                 } else {
2498                         pci_unmap_single(sp->pdev, (dma_addr_t)
2499                                 ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN,
2500                                 PCI_DMA_FROMDEVICE);
2501                         pci_unmap_single(sp->pdev, (dma_addr_t)
2502                                 ((RxD3_t*)rxdp)->Buffer1_ptr, 
2503                                 l3l4hdr_size + 4,
2504                                 PCI_DMA_FROMDEVICE);
2505                         pci_unmap_single(sp->pdev, (dma_addr_t)
2506                                 ((RxD3_t*)rxdp)->Buffer2_ptr, dev->mtu,
2507                                 PCI_DMA_FROMDEVICE);
2508                         memset(rxdp, 0, sizeof(RxD3_t));
2509                 }
2510                 dev_kfree_skb(skb);
2511                 atomic_dec(&sp->rx_bufs_left[ring_no]);
2512         }
2513 }
2514
2515 /**
2516  *  free_rx_buffers - Frees all Rx buffers
2517  *  @sp: device private variable.
2518  *  Description:
2519  *  This function will free all Rx buffers allocated by host.
2520  *  Return Value:
2521  *  NONE.
2522  */
2523
2524 static void free_rx_buffers(struct s2io_nic *sp)
2525 {
2526         struct net_device *dev = sp->dev;
2527         int i, blk = 0, buf_cnt = 0;
2528         mac_info_t *mac_control;
2529         struct config_param *config;
2530
2531         mac_control = &sp->mac_control;
2532         config = &sp->config;
2533
2534         for (i = 0; i < config->rx_ring_num; i++) {
2535                 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2536                         free_rxd_blk(sp,i,blk);
2537
2538                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2539                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2540                 mac_control->rings[i].rx_curr_put_info.offset = 0;
2541                 mac_control->rings[i].rx_curr_get_info.offset = 0;
2542                 atomic_set(&sp->rx_bufs_left[i], 0);
2543                 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2544                           dev->name, buf_cnt, i);
2545         }
2546 }
2547
2548 /**
2549  * s2io_poll - Rx interrupt handler for NAPI support
2550  * @dev : pointer to the device structure.
2551  * @budget : The number of packets that were budgeted to be processed
2552  * during  one pass through the 'Poll" function.
2553  * Description:
2554  * Comes into picture only if NAPI support has been incorporated. It does
2555  * the same thing that rx_intr_handler does, but not in a interrupt context
2556  * also It will process only a given number of packets.
2557  * Return value:
2558  * 0 on success and 1 if there are No Rx packets to be processed.
2559  */
2560
2561 #if defined(CONFIG_S2IO_NAPI)
2562 static int s2io_poll(struct net_device *dev, int *budget)
2563 {
2564         nic_t *nic = dev->priv;
2565         int pkt_cnt = 0, org_pkts_to_process;
2566         mac_info_t *mac_control;
2567         struct config_param *config;
2568         XENA_dev_config_t __iomem *bar0 = nic->bar0;
2569         u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2570         int i;
2571
2572         atomic_inc(&nic->isr_cnt);
2573         mac_control = &nic->mac_control;
2574         config = &nic->config;
2575
2576         nic->pkts_to_process = *budget;
2577         if (nic->pkts_to_process > dev->quota)
2578                 nic->pkts_to_process = dev->quota;
2579         org_pkts_to_process = nic->pkts_to_process;
2580
2581         writeq(val64, &bar0->rx_traffic_int);
2582         val64 = readl(&bar0->rx_traffic_int);
2583
2584         for (i = 0; i < config->rx_ring_num; i++) {
2585                 rx_intr_handler(&mac_control->rings[i]);
2586                 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2587                 if (!nic->pkts_to_process) {
2588                         /* Quota for the current iteration has been met */
2589                         goto no_rx;
2590                 }
2591         }
2592         if (!pkt_cnt)
2593                 pkt_cnt = 1;
2594
2595         dev->quota -= pkt_cnt;
2596         *budget -= pkt_cnt;
2597         netif_rx_complete(dev);
2598
2599         for (i = 0; i < config->rx_ring_num; i++) {
2600                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2601                         DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2602                         DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2603                         break;
2604                 }
2605         }
2606         /* Re enable the Rx interrupts. */
2607         writeq(0x0, &bar0->rx_traffic_mask);
2608         val64 = readl(&bar0->rx_traffic_mask);
2609         atomic_dec(&nic->isr_cnt);
2610         return 0;
2611
2612 no_rx:
2613         dev->quota -= pkt_cnt;
2614         *budget -= pkt_cnt;
2615
2616         for (i = 0; i < config->rx_ring_num; i++) {
2617                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2618                         DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2619                         DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2620                         break;
2621                 }
2622         }
2623         atomic_dec(&nic->isr_cnt);
2624         return 1;
2625 }
2626 #endif
2627
2628 /**
2629  * s2io_netpoll - Rx interrupt service handler for netpoll support
2630  * @dev : pointer to the device structure.
2631  * Description:
2632  * Polling 'interrupt' - used by things like netconsole to send skbs
2633  * without having to re-enable interrupts. It's not called while
2634  * the interrupt routine is executing.
2635  */
2636
2637 #ifdef CONFIG_NET_POLL_CONTROLLER
2638 static void s2io_netpoll(struct net_device *dev)
2639 {
2640         nic_t *nic = dev->priv;
2641         mac_info_t *mac_control;
2642         struct config_param *config;
2643         XENA_dev_config_t __iomem *bar0 = nic->bar0;
2644         u64 val64;
2645         int i;
2646
2647         disable_irq(dev->irq);
2648
2649         atomic_inc(&nic->isr_cnt);
2650         mac_control = &nic->mac_control;
2651         config = &nic->config;
2652
2653         val64 = readq(&bar0->rx_traffic_int);
2654         writeq(val64, &bar0->rx_traffic_int);
2655
2656         for (i = 0; i < config->rx_ring_num; i++)
2657                 rx_intr_handler(&mac_control->rings[i]);
2658
2659         for (i = 0; i < config->rx_ring_num; i++) {
2660                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2661                         DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2662                         DBG_PRINT(ERR_DBG, " in Rx Netpoll!!\n");
2663                         break;
2664                 }
2665         }
2666         atomic_dec(&nic->isr_cnt);
2667         enable_irq(dev->irq);
2668         return;
2669 }
2670 #endif
2671
2672 /**
2673  *  rx_intr_handler - Rx interrupt handler
2674  *  @nic: device private variable.
2675  *  Description:
2676  *  If the interrupt is because of a received frame or if the
2677  *  receive ring contains fresh as yet un-processed frames,this function is
2678  *  called. It picks out the RxD at which place the last Rx processing had
2679  *  stopped and sends the skb to the OSM's Rx handler and then increments
2680  *  the offset.
2681  *  Return Value:
2682  *  NONE.
2683  */
2684 static void rx_intr_handler(ring_info_t *ring_data)
2685 {
2686         nic_t *nic = ring_data->nic;
2687         struct net_device *dev = (struct net_device *) nic->dev;
2688         int get_block, put_block, put_offset;
2689         rx_curr_get_info_t get_info, put_info;
2690         RxD_t *rxdp;
2691         struct sk_buff *skb;
2692 #ifndef CONFIG_S2IO_NAPI
2693         int pkt_cnt = 0;
2694 #endif
2695         int i;
2696
2697         spin_lock(&nic->rx_lock);
2698         if (atomic_read(&nic->card_state) == CARD_DOWN) {
2699                 DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n",
2700                           __FUNCTION__, dev->name);
2701                 spin_unlock(&nic->rx_lock);
2702                 return;
2703         }
2704
2705         get_info = ring_data->rx_curr_get_info;
2706         get_block = get_info.block_index;
2707         put_info = ring_data->rx_curr_put_info;
2708         put_block = put_info.block_index;
2709         rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2710 #ifndef CONFIG_S2IO_NAPI
2711         spin_lock(&nic->put_lock);
2712         put_offset = ring_data->put_pos;
2713         spin_unlock(&nic->put_lock);
2714 #else
2715         put_offset = (put_block * (rxd_count[nic->rxd_mode] + 1)) +
2716                 put_info.offset;
2717 #endif
2718         while (RXD_IS_UP2DT(rxdp)) {
2719                 /* If your are next to put index then it's FIFO full condition */
2720                 if ((get_block == put_block) &&
2721                     (get_info.offset + 1) == put_info.offset) {
2722                         DBG_PRINT(ERR_DBG, "%s: Ring Full\n",dev->name);
2723                         break;
2724                 }
2725                 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2726                 if (skb == NULL) {
2727                         DBG_PRINT(ERR_DBG, "%s: The skb is ",
2728                                   dev->name);
2729                         DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2730                         spin_unlock(&nic->rx_lock);
2731                         return;
2732                 }
2733                 if (nic->rxd_mode == RXD_MODE_1) {
2734                         pci_unmap_single(nic->pdev, (dma_addr_t)
2735                                  ((RxD1_t*)rxdp)->Buffer0_ptr,
2736                                  dev->mtu +
2737                                  HEADER_ETHERNET_II_802_3_SIZE +
2738                                  HEADER_802_2_SIZE +
2739                                  HEADER_SNAP_SIZE,
2740                                  PCI_DMA_FROMDEVICE);
2741                 } else if (nic->rxd_mode == RXD_MODE_3B) {
2742                         pci_unmap_single(nic->pdev, (dma_addr_t)
2743                                  ((RxD3_t*)rxdp)->Buffer0_ptr,
2744                                  BUF0_LEN, PCI_DMA_FROMDEVICE);
2745                         pci_unmap_single(nic->pdev, (dma_addr_t)
2746                                  ((RxD3_t*)rxdp)->Buffer1_ptr,
2747                                  BUF1_LEN, PCI_DMA_FROMDEVICE);
2748                         pci_unmap_single(nic->pdev, (dma_addr_t)
2749                                  ((RxD3_t*)rxdp)->Buffer2_ptr,
2750                                  dev->mtu + 4,
2751                                  PCI_DMA_FROMDEVICE);
2752                 } else {
2753                         pci_unmap_single(nic->pdev, (dma_addr_t)
2754                                          ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN,
2755                                          PCI_DMA_FROMDEVICE);
2756                         pci_unmap_single(nic->pdev, (dma_addr_t)
2757                                          ((RxD3_t*)rxdp)->Buffer1_ptr,
2758                                          l3l4hdr_size + 4,
2759                                          PCI_DMA_FROMDEVICE);
2760                         pci_unmap_single(nic->pdev, (dma_addr_t)
2761                                          ((RxD3_t*)rxdp)->Buffer2_ptr,
2762                                          dev->mtu, PCI_DMA_FROMDEVICE);
2763                 }
2764                 prefetch(skb->data);
2765                 rx_osm_handler(ring_data, rxdp);
2766                 get_info.offset++;
2767                 ring_data->rx_curr_get_info.offset = get_info.offset;
2768                 rxdp = ring_data->rx_blocks[get_block].
2769                                 rxds[get_info.offset].virt_addr;
2770                 if (get_info.offset == rxd_count[nic->rxd_mode]) {
2771                         get_info.offset = 0;
2772                         ring_data->rx_curr_get_info.offset = get_info.offset;
2773                         get_block++;
2774                         if (get_block == ring_data->block_count)
2775                                 get_block = 0;
2776                         ring_data->rx_curr_get_info.block_index = get_block;
2777                         rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2778                 }
2779
2780 #ifdef CONFIG_S2IO_NAPI
2781                 nic->pkts_to_process -= 1;
2782                 if (!nic->pkts_to_process)
2783                         break;
2784 #else
2785                 pkt_cnt++;
2786                 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2787                         break;
2788 #endif
2789         }
2790         if (nic->lro) {
2791                 /* Clear all LRO sessions before exiting */
2792                 for (i=0; i<MAX_LRO_SESSIONS; i++) {
2793                         lro_t *lro = &nic->lro0_n[i];
2794                         if (lro->in_use) {
2795                                 update_L3L4_header(nic, lro);
2796                                 queue_rx_frame(lro->parent);
2797                                 clear_lro_session(lro);
2798                         }
2799                 }
2800         }
2801
2802         spin_unlock(&nic->rx_lock);
2803 }
2804
2805 /**
2806  *  tx_intr_handler - Transmit interrupt handler
2807  *  @nic : device private variable
2808  *  Description:
2809  *  If an interrupt was raised to indicate DMA complete of the
2810  *  Tx packet, this function is called. It identifies the last TxD
2811  *  whose buffer was freed and frees all skbs whose data have already
2812  *  DMA'ed into the NICs internal memory.
2813  *  Return Value:
2814  *  NONE
2815  */
2816
2817 static void tx_intr_handler(fifo_info_t *fifo_data)
2818 {
2819         nic_t *nic = fifo_data->nic;
2820         struct net_device *dev = (struct net_device *) nic->dev;
2821         tx_curr_get_info_t get_info, put_info;
2822         struct sk_buff *skb;
2823         TxD_t *txdlp;
2824
2825         get_info = fifo_data->tx_curr_get_info;
2826         put_info = fifo_data->tx_curr_put_info;
2827         txdlp = (TxD_t *) fifo_data->list_info[get_info.offset].
2828             list_virt_addr;
2829         while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2830                (get_info.offset != put_info.offset) &&
2831                (txdlp->Host_Control)) {
2832                 /* Check for TxD errors */
2833                 if (txdlp->Control_1 & TXD_T_CODE) {
2834                         unsigned long long err;
2835                         err = txdlp->Control_1 & TXD_T_CODE;
2836                         if (err & 0x1) {
2837                                 nic->mac_control.stats_info->sw_stat.
2838                                                 parity_err_cnt++;
2839                         }
2840                         if ((err >> 48) == 0xA) {
2841                                 DBG_PRINT(TX_DBG, "TxD returned due \
2842 to loss of link\n");
2843                         }
2844                         else {
2845                                 DBG_PRINT(ERR_DBG, "***TxD error \
2846 %llx\n", err);
2847                         }
2848                 }
2849
2850                 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
2851                 if (skb == NULL) {
2852                         DBG_PRINT(ERR_DBG, "%s: Null skb ",
2853                         __FUNCTION__);
2854                         DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2855                         return;
2856                 }
2857
2858                 /* Updating the statistics block */
2859                 nic->stats.tx_bytes += skb->len;
2860                 dev_kfree_skb_irq(skb);
2861
2862                 get_info.offset++;
2863                 if (get_info.offset == get_info.fifo_len + 1)
2864                         get_info.offset = 0;
2865                 txdlp = (TxD_t *) fifo_data->list_info
2866                     [get_info.offset].list_virt_addr;
2867                 fifo_data->tx_curr_get_info.offset =
2868                     get_info.offset;
2869         }
2870
2871         spin_lock(&nic->tx_lock);
2872         if (netif_queue_stopped(dev))
2873                 netif_wake_queue(dev);
2874         spin_unlock(&nic->tx_lock);
2875 }
2876
2877 /**
2878  *  s2io_mdio_write - Function to write in to MDIO registers
2879  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2880  *  @addr     : address value
2881  *  @value    : data value
2882  *  @dev      : pointer to net_device structure
2883  *  Description:
2884  *  This function is used to write values to the MDIO registers
2885  *  NONE
2886  */
2887 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
2888 {
2889         u64 val64 = 0x0;
2890         nic_t *sp = dev->priv;
2891         XENA_dev_config_t *bar0 = (XENA_dev_config_t *)sp->bar0;
2892
2893         //address transaction
2894         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2895                         | MDIO_MMD_DEV_ADDR(mmd_type)
2896                         | MDIO_MMS_PRT_ADDR(0x0);
2897         writeq(val64, &bar0->mdio_control);
2898         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2899         writeq(val64, &bar0->mdio_control);
2900         udelay(100);
2901
2902         //Data transaction
2903         val64 = 0x0;
2904         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2905                         | MDIO_MMD_DEV_ADDR(mmd_type)
2906                         | MDIO_MMS_PRT_ADDR(0x0)
2907                         | MDIO_MDIO_DATA(value)
2908                         | MDIO_OP(MDIO_OP_WRITE_TRANS);
2909         writeq(val64, &bar0->mdio_control);
2910         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2911         writeq(val64, &bar0->mdio_control);
2912         udelay(100);
2913
2914         val64 = 0x0;
2915         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2916         | MDIO_MMD_DEV_ADDR(mmd_type)
2917         | MDIO_MMS_PRT_ADDR(0x0)
2918         | MDIO_OP(MDIO_OP_READ_TRANS);
2919         writeq(val64, &bar0->mdio_control);
2920         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2921         writeq(val64, &bar0->mdio_control);
2922         udelay(100);
2923
2924 }
2925
2926 /**
2927  *  s2io_mdio_read - Function to write in to MDIO registers
2928  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2929  *  @addr     : address value
2930  *  @dev      : pointer to net_device structure
2931  *  Description:
2932  *  This function is used to read values to the MDIO registers
2933  *  NONE
2934  */
2935 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
2936 {
2937         u64 val64 = 0x0;
2938         u64 rval64 = 0x0;
2939         nic_t *sp = dev->priv;
2940         XENA_dev_config_t *bar0 = (XENA_dev_config_t *)sp->bar0;
2941
2942         /* address transaction */
2943         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2944                         | MDIO_MMD_DEV_ADDR(mmd_type)
2945                         | MDIO_MMS_PRT_ADDR(0x0);
2946         writeq(val64, &bar0->mdio_control);
2947         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2948         writeq(val64, &bar0->mdio_control);
2949         udelay(100);
2950
2951         /* Data transaction */
2952         val64 = 0x0;
2953         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2954                         | MDIO_MMD_DEV_ADDR(mmd_type)
2955                         | MDIO_MMS_PRT_ADDR(0x0)
2956                         | MDIO_OP(MDIO_OP_READ_TRANS);
2957         writeq(val64, &bar0->mdio_control);
2958         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2959         writeq(val64, &bar0->mdio_control);
2960         udelay(100);
2961
2962         /* Read the value from regs */
2963         rval64 = readq(&bar0->mdio_control);
2964         rval64 = rval64 & 0xFFFF0000;
2965         rval64 = rval64 >> 16;
2966         return rval64;
2967 }
2968 /**
2969  *  s2io_chk_xpak_counter - Function to check the status of the xpak counters
2970  *  @counter      : couter value to be updated
2971  *  @flag         : flag to indicate the status
2972  *  @type         : counter type
2973  *  Description:
2974  *  This function is to check the status of the xpak counters value
2975  *  NONE
2976  */
2977
2978 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
2979 {
2980         u64 mask = 0x3;
2981         u64 val64;
2982         int i;
2983         for(i = 0; i <index; i++)
2984                 mask = mask << 0x2;
2985
2986         if(flag > 0)
2987         {
2988                 *counter = *counter + 1;
2989                 val64 = *regs_stat & mask;
2990                 val64 = val64 >> (index * 0x2);
2991                 val64 = val64 + 1;
2992                 if(val64 == 3)
2993                 {
2994                         switch(type)
2995                         {
2996                         case 1:
2997                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
2998                                           "service. Excessive temperatures may "
2999                                           "result in premature transceiver "
3000                                           "failure \n");
3001                         break;
3002                         case 2:
3003                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3004                                           "service Excessive bias currents may "
3005                                           "indicate imminent laser diode "
3006                                           "failure \n");
3007                         break;
3008                         case 3:
3009                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3010                                           "service Excessive laser output "
3011                                           "power may saturate far-end "
3012                                           "receiver\n");
3013                         break;
3014                         default:
3015                                 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
3016                                           "type \n");
3017                         }
3018                         val64 = 0x0;
3019                 }
3020                 val64 = val64 << (index * 0x2);
3021                 *regs_stat = (*regs_stat & (~mask)) | (val64);
3022
3023         } else {
3024                 *regs_stat = *regs_stat & (~mask);
3025         }
3026 }
3027
3028 /**
3029  *  s2io_updt_xpak_counter - Function to update the xpak counters
3030  *  @dev         : pointer to net_device struct
3031  *  Description:
3032  *  This function is to upate the status of the xpak counters value
3033  *  NONE
3034  */
3035 static void s2io_updt_xpak_counter(struct net_device *dev)
3036 {
3037         u16 flag  = 0x0;
3038         u16 type  = 0x0;
3039         u16 val16 = 0x0;
3040         u64 val64 = 0x0;
3041         u64 addr  = 0x0;
3042
3043         nic_t *sp = dev->priv;
3044         StatInfo_t *stat_info = sp->mac_control.stats_info;
3045
3046         /* Check the communication with the MDIO slave */
3047         addr = 0x0000;
3048         val64 = 0x0;
3049         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3050         if((val64 == 0xFFFF) || (val64 == 0x0000))
3051         {
3052                 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3053                           "Returned %llx\n", (unsigned long long)val64);
3054                 return;
3055         }
3056
3057         /* Check for the expecte value of 2040 at PMA address 0x0000 */
3058         if(val64 != 0x2040)
3059         {
3060                 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3061                 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3062                           (unsigned long long)val64);
3063                 return;
3064         }
3065
3066         /* Loading the DOM register to MDIO register */
3067         addr = 0xA100;
3068         s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3069         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3070
3071         /* Reading the Alarm flags */
3072         addr = 0xA070;
3073         val64 = 0x0;
3074         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3075
3076         flag = CHECKBIT(val64, 0x7);
3077         type = 1;
3078         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3079                                 &stat_info->xpak_stat.xpak_regs_stat,
3080                                 0x0, flag, type);
3081
3082         if(CHECKBIT(val64, 0x6))
3083                 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3084
3085         flag = CHECKBIT(val64, 0x3);
3086         type = 2;
3087         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3088                                 &stat_info->xpak_stat.xpak_regs_stat,
3089                                 0x2, flag, type);
3090
3091         if(CHECKBIT(val64, 0x2))
3092                 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3093
3094         flag = CHECKBIT(val64, 0x1);
3095         type = 3;
3096         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3097                                 &stat_info->xpak_stat.xpak_regs_stat,
3098                                 0x4, flag, type);
3099
3100         if(CHECKBIT(val64, 0x0))
3101                 stat_info->xpak_stat.alarm_laser_output_power_low++;
3102
3103         /* Reading the Warning flags */
3104         addr = 0xA074;
3105         val64 = 0x0;
3106         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3107
3108         if(CHECKBIT(val64, 0x7))
3109                 stat_info->xpak_stat.warn_transceiver_temp_high++;
3110
3111         if(CHECKBIT(val64, 0x6))
3112                 stat_info->xpak_stat.warn_transceiver_temp_low++;