[ETHTOOL] Provide default behaviors for a few ethtool sub-ioctls
[linux-2.6.git] / drivers / net / s2io.c
1 /************************************************************************
2  * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3  * Copyright(c) 2002-2007 Neterion Inc.
4
5  * This software may be used and distributed according to the terms of
6  * the GNU General Public License (GPL), incorporated herein by reference.
7  * Drivers based on or derived from this code fall under the GPL and must
8  * retain the authorship, copyright and license notice.  This file is not
9  * a complete program and may only be used when the entire operating
10  * system is licensed under the GPL.
11  * See the file COPYING in this distribution for more information.
12  *
13  * Credits:
14  * Jeff Garzik          : For pointing out the improper error condition
15  *                        check in the s2io_xmit routine and also some
16  *                        issues in the Tx watch dog function. Also for
17  *                        patiently answering all those innumerable
18  *                        questions regaring the 2.6 porting issues.
19  * Stephen Hemminger    : Providing proper 2.6 porting mechanism for some
20  *                        macros available only in 2.6 Kernel.
21  * Francois Romieu      : For pointing out all code part that were
22  *                        deprecated and also styling related comments.
23  * Grant Grundler       : For helping me get rid of some Architecture
24  *                        dependent code.
25  * Christopher Hellwig  : Some more 2.6 specific issues in the driver.
26  *
27  * The module loadable parameters that are supported by the driver and a brief
28  * explaination of all the variables.
29  *
30  * rx_ring_num : This can be used to program the number of receive rings used
31  * in the driver.
32  * rx_ring_sz: This defines the number of receive blocks each ring can have.
33  *     This is also an array of size 8.
34  * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35  *              values are 1, 2.
36  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37  * tx_fifo_len: This too is an array of 8. Each element defines the number of
38  * Tx descriptors that can be associated with each corresponding FIFO.
39  * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40  *     2(MSI_X). Default value is '2(MSI_X)'
41  * lro: Specifies whether to enable Large Receive Offload (LRO) or not.
42  *     Possible values '1' for enable '0' for disable. Default is '0'
43  * lro_max_pkts: This parameter defines maximum number of packets can be
44  *     aggregated as a single large packet
45  * napi: This parameter used to enable/disable NAPI (polling Rx)
46  *     Possible values '1' for enable and '0' for disable. Default is '1'
47  * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48  *      Possible values '1' for enable and '0' for disable. Default is '0'
49  * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50  *                 Possible values '1' for enable , '0' for disable.
51  *                 Default is '2' - which means disable in promisc mode
52  *                 and enable in non-promiscuous mode.
53  ************************************************************************/
54
55 #include <linux/module.h>
56 #include <linux/types.h>
57 #include <linux/errno.h>
58 #include <linux/ioport.h>
59 #include <linux/pci.h>
60 #include <linux/dma-mapping.h>
61 #include <linux/kernel.h>
62 #include <linux/netdevice.h>
63 #include <linux/etherdevice.h>
64 #include <linux/skbuff.h>
65 #include <linux/init.h>
66 #include <linux/delay.h>
67 #include <linux/stddef.h>
68 #include <linux/ioctl.h>
69 #include <linux/timex.h>
70 #include <linux/ethtool.h>
71 #include <linux/workqueue.h>
72 #include <linux/if_vlan.h>
73 #include <linux/ip.h>
74 #include <linux/tcp.h>
75 #include <net/tcp.h>
76
77 #include <asm/system.h>
78 #include <asm/uaccess.h>
79 #include <asm/io.h>
80 #include <asm/div64.h>
81 #include <asm/irq.h>
82
83 /* local include */
84 #include "s2io.h"
85 #include "s2io-regs.h"
86
87 #define DRV_VERSION "2.0.26.2"
88
89 /* S2io Driver name & version. */
90 static char s2io_driver_name[] = "Neterion";
91 static char s2io_driver_version[] = DRV_VERSION;
92
93 static int rxd_size[2] = {32,48};
94 static int rxd_count[2] = {127,85};
95
96 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
97 {
98         int ret;
99
100         ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
101                 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
102
103         return ret;
104 }
105
106 /*
107  * Cards with following subsystem_id have a link state indication
108  * problem, 600B, 600C, 600D, 640B, 640C and 640D.
109  * macro below identifies these cards given the subsystem_id.
110  */
111 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
112         (dev_type == XFRAME_I_DEVICE) ?                 \
113                 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
114                  ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
115
116 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
117                                       ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
118 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
119 #define PANIC   1
120 #define LOW     2
121 static inline int rx_buffer_level(struct s2io_nic * sp, int rxb_size, int ring)
122 {
123         struct mac_info *mac_control;
124
125         mac_control = &sp->mac_control;
126         if (rxb_size <= rxd_count[sp->rxd_mode])
127                 return PANIC;
128         else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
129                 return  LOW;
130         return 0;
131 }
132
133 static inline int is_s2io_card_up(const struct s2io_nic * sp)
134 {
135         return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
136 }
137
138 /* Ethtool related variables and Macros. */
139 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
140         "Register test\t(offline)",
141         "Eeprom test\t(offline)",
142         "Link test\t(online)",
143         "RLDRAM test\t(offline)",
144         "BIST Test\t(offline)"
145 };
146
147 static char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
148         {"tmac_frms"},
149         {"tmac_data_octets"},
150         {"tmac_drop_frms"},
151         {"tmac_mcst_frms"},
152         {"tmac_bcst_frms"},
153         {"tmac_pause_ctrl_frms"},
154         {"tmac_ttl_octets"},
155         {"tmac_ucst_frms"},
156         {"tmac_nucst_frms"},
157         {"tmac_any_err_frms"},
158         {"tmac_ttl_less_fb_octets"},
159         {"tmac_vld_ip_octets"},
160         {"tmac_vld_ip"},
161         {"tmac_drop_ip"},
162         {"tmac_icmp"},
163         {"tmac_rst_tcp"},
164         {"tmac_tcp"},
165         {"tmac_udp"},
166         {"rmac_vld_frms"},
167         {"rmac_data_octets"},
168         {"rmac_fcs_err_frms"},
169         {"rmac_drop_frms"},
170         {"rmac_vld_mcst_frms"},
171         {"rmac_vld_bcst_frms"},
172         {"rmac_in_rng_len_err_frms"},
173         {"rmac_out_rng_len_err_frms"},
174         {"rmac_long_frms"},
175         {"rmac_pause_ctrl_frms"},
176         {"rmac_unsup_ctrl_frms"},
177         {"rmac_ttl_octets"},
178         {"rmac_accepted_ucst_frms"},
179         {"rmac_accepted_nucst_frms"},
180         {"rmac_discarded_frms"},
181         {"rmac_drop_events"},
182         {"rmac_ttl_less_fb_octets"},
183         {"rmac_ttl_frms"},
184         {"rmac_usized_frms"},
185         {"rmac_osized_frms"},
186         {"rmac_frag_frms"},
187         {"rmac_jabber_frms"},
188         {"rmac_ttl_64_frms"},
189         {"rmac_ttl_65_127_frms"},
190         {"rmac_ttl_128_255_frms"},
191         {"rmac_ttl_256_511_frms"},
192         {"rmac_ttl_512_1023_frms"},
193         {"rmac_ttl_1024_1518_frms"},
194         {"rmac_ip"},
195         {"rmac_ip_octets"},
196         {"rmac_hdr_err_ip"},
197         {"rmac_drop_ip"},
198         {"rmac_icmp"},
199         {"rmac_tcp"},
200         {"rmac_udp"},
201         {"rmac_err_drp_udp"},
202         {"rmac_xgmii_err_sym"},
203         {"rmac_frms_q0"},
204         {"rmac_frms_q1"},
205         {"rmac_frms_q2"},
206         {"rmac_frms_q3"},
207         {"rmac_frms_q4"},
208         {"rmac_frms_q5"},
209         {"rmac_frms_q6"},
210         {"rmac_frms_q7"},
211         {"rmac_full_q0"},
212         {"rmac_full_q1"},
213         {"rmac_full_q2"},
214         {"rmac_full_q3"},
215         {"rmac_full_q4"},
216         {"rmac_full_q5"},
217         {"rmac_full_q6"},
218         {"rmac_full_q7"},
219         {"rmac_pause_cnt"},
220         {"rmac_xgmii_data_err_cnt"},
221         {"rmac_xgmii_ctrl_err_cnt"},
222         {"rmac_accepted_ip"},
223         {"rmac_err_tcp"},
224         {"rd_req_cnt"},
225         {"new_rd_req_cnt"},
226         {"new_rd_req_rtry_cnt"},
227         {"rd_rtry_cnt"},
228         {"wr_rtry_rd_ack_cnt"},
229         {"wr_req_cnt"},
230         {"new_wr_req_cnt"},
231         {"new_wr_req_rtry_cnt"},
232         {"wr_rtry_cnt"},
233         {"wr_disc_cnt"},
234         {"rd_rtry_wr_ack_cnt"},
235         {"txp_wr_cnt"},
236         {"txd_rd_cnt"},
237         {"txd_wr_cnt"},
238         {"rxd_rd_cnt"},
239         {"rxd_wr_cnt"},
240         {"txf_rd_cnt"},
241         {"rxf_wr_cnt"}
242 };
243
244 static char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
245         {"rmac_ttl_1519_4095_frms"},
246         {"rmac_ttl_4096_8191_frms"},
247         {"rmac_ttl_8192_max_frms"},
248         {"rmac_ttl_gt_max_frms"},
249         {"rmac_osized_alt_frms"},
250         {"rmac_jabber_alt_frms"},
251         {"rmac_gt_max_alt_frms"},
252         {"rmac_vlan_frms"},
253         {"rmac_len_discard"},
254         {"rmac_fcs_discard"},
255         {"rmac_pf_discard"},
256         {"rmac_da_discard"},
257         {"rmac_red_discard"},
258         {"rmac_rts_discard"},
259         {"rmac_ingm_full_discard"},
260         {"link_fault_cnt"}
261 };
262
263 static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
264         {"\n DRIVER STATISTICS"},
265         {"single_bit_ecc_errs"},
266         {"double_bit_ecc_errs"},
267         {"parity_err_cnt"},
268         {"serious_err_cnt"},
269         {"soft_reset_cnt"},
270         {"fifo_full_cnt"},
271         {"ring_0_full_cnt"},
272         {"ring_1_full_cnt"},
273         {"ring_2_full_cnt"},
274         {"ring_3_full_cnt"},
275         {"ring_4_full_cnt"},
276         {"ring_5_full_cnt"},
277         {"ring_6_full_cnt"},
278         {"ring_7_full_cnt"},
279         ("alarm_transceiver_temp_high"),
280         ("alarm_transceiver_temp_low"),
281         ("alarm_laser_bias_current_high"),
282         ("alarm_laser_bias_current_low"),
283         ("alarm_laser_output_power_high"),
284         ("alarm_laser_output_power_low"),
285         ("warn_transceiver_temp_high"),
286         ("warn_transceiver_temp_low"),
287         ("warn_laser_bias_current_high"),
288         ("warn_laser_bias_current_low"),
289         ("warn_laser_output_power_high"),
290         ("warn_laser_output_power_low"),
291         ("lro_aggregated_pkts"),
292         ("lro_flush_both_count"),
293         ("lro_out_of_sequence_pkts"),
294         ("lro_flush_due_to_max_pkts"),
295         ("lro_avg_aggr_pkts"),
296         ("mem_alloc_fail_cnt"),
297         ("pci_map_fail_cnt"),
298         ("watchdog_timer_cnt"),
299         ("mem_allocated"),
300         ("mem_freed"),
301         ("link_up_cnt"),
302         ("link_down_cnt"),
303         ("link_up_time"),
304         ("link_down_time"),
305         ("tx_tcode_buf_abort_cnt"),
306         ("tx_tcode_desc_abort_cnt"),
307         ("tx_tcode_parity_err_cnt"),
308         ("tx_tcode_link_loss_cnt"),
309         ("tx_tcode_list_proc_err_cnt"),
310         ("rx_tcode_parity_err_cnt"),
311         ("rx_tcode_abort_cnt"),
312         ("rx_tcode_parity_abort_cnt"),
313         ("rx_tcode_rda_fail_cnt"),
314         ("rx_tcode_unkn_prot_cnt"),
315         ("rx_tcode_fcs_err_cnt"),
316         ("rx_tcode_buf_size_err_cnt"),
317         ("rx_tcode_rxd_corrupt_cnt"),
318         ("rx_tcode_unkn_err_cnt"),
319         {"tda_err_cnt"},
320         {"pfc_err_cnt"},
321         {"pcc_err_cnt"},
322         {"tti_err_cnt"},
323         {"tpa_err_cnt"},
324         {"sm_err_cnt"},
325         {"lso_err_cnt"},
326         {"mac_tmac_err_cnt"},
327         {"mac_rmac_err_cnt"},
328         {"xgxs_txgxs_err_cnt"},
329         {"xgxs_rxgxs_err_cnt"},
330         {"rc_err_cnt"},
331         {"prc_pcix_err_cnt"},
332         {"rpa_err_cnt"},
333         {"rda_err_cnt"},
334         {"rti_err_cnt"},
335         {"mc_err_cnt"}
336 };
337
338 #define S2IO_XENA_STAT_LEN sizeof(ethtool_xena_stats_keys)/ ETH_GSTRING_LEN
339 #define S2IO_ENHANCED_STAT_LEN sizeof(ethtool_enhanced_stats_keys)/ \
340                                         ETH_GSTRING_LEN
341 #define S2IO_DRIVER_STAT_LEN sizeof(ethtool_driver_stats_keys)/ ETH_GSTRING_LEN
342
343 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN )
344 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN )
345
346 #define XFRAME_I_STAT_STRINGS_LEN ( XFRAME_I_STAT_LEN * ETH_GSTRING_LEN )
347 #define XFRAME_II_STAT_STRINGS_LEN ( XFRAME_II_STAT_LEN * ETH_GSTRING_LEN )
348
349 #define S2IO_TEST_LEN   sizeof(s2io_gstrings) / ETH_GSTRING_LEN
350 #define S2IO_STRINGS_LEN        S2IO_TEST_LEN * ETH_GSTRING_LEN
351
352 #define S2IO_TIMER_CONF(timer, handle, arg, exp)                \
353                         init_timer(&timer);                     \
354                         timer.function = handle;                \
355                         timer.data = (unsigned long) arg;       \
356                         mod_timer(&timer, (jiffies + exp))      \
357
358 /* Add the vlan */
359 static void s2io_vlan_rx_register(struct net_device *dev,
360                                         struct vlan_group *grp)
361 {
362         struct s2io_nic *nic = dev->priv;
363         unsigned long flags;
364
365         spin_lock_irqsave(&nic->tx_lock, flags);
366         nic->vlgrp = grp;
367         spin_unlock_irqrestore(&nic->tx_lock, flags);
368 }
369
370 /* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
371 static int vlan_strip_flag;
372
373 /*
374  * Constants to be programmed into the Xena's registers, to configure
375  * the XAUI.
376  */
377
378 #define END_SIGN        0x0
379 static const u64 herc_act_dtx_cfg[] = {
380         /* Set address */
381         0x8000051536750000ULL, 0x80000515367500E0ULL,
382         /* Write data */
383         0x8000051536750004ULL, 0x80000515367500E4ULL,
384         /* Set address */
385         0x80010515003F0000ULL, 0x80010515003F00E0ULL,
386         /* Write data */
387         0x80010515003F0004ULL, 0x80010515003F00E4ULL,
388         /* Set address */
389         0x801205150D440000ULL, 0x801205150D4400E0ULL,
390         /* Write data */
391         0x801205150D440004ULL, 0x801205150D4400E4ULL,
392         /* Set address */
393         0x80020515F2100000ULL, 0x80020515F21000E0ULL,
394         /* Write data */
395         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
396         /* Done */
397         END_SIGN
398 };
399
400 static const u64 xena_dtx_cfg[] = {
401         /* Set address */
402         0x8000051500000000ULL, 0x80000515000000E0ULL,
403         /* Write data */
404         0x80000515D9350004ULL, 0x80000515D93500E4ULL,
405         /* Set address */
406         0x8001051500000000ULL, 0x80010515000000E0ULL,
407         /* Write data */
408         0x80010515001E0004ULL, 0x80010515001E00E4ULL,
409         /* Set address */
410         0x8002051500000000ULL, 0x80020515000000E0ULL,
411         /* Write data */
412         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
413         END_SIGN
414 };
415
416 /*
417  * Constants for Fixing the MacAddress problem seen mostly on
418  * Alpha machines.
419  */
420 static const u64 fix_mac[] = {
421         0x0060000000000000ULL, 0x0060600000000000ULL,
422         0x0040600000000000ULL, 0x0000600000000000ULL,
423         0x0020600000000000ULL, 0x0060600000000000ULL,
424         0x0020600000000000ULL, 0x0060600000000000ULL,
425         0x0020600000000000ULL, 0x0060600000000000ULL,
426         0x0020600000000000ULL, 0x0060600000000000ULL,
427         0x0020600000000000ULL, 0x0060600000000000ULL,
428         0x0020600000000000ULL, 0x0060600000000000ULL,
429         0x0020600000000000ULL, 0x0060600000000000ULL,
430         0x0020600000000000ULL, 0x0060600000000000ULL,
431         0x0020600000000000ULL, 0x0060600000000000ULL,
432         0x0020600000000000ULL, 0x0060600000000000ULL,
433         0x0020600000000000ULL, 0x0000600000000000ULL,
434         0x0040600000000000ULL, 0x0060600000000000ULL,
435         END_SIGN
436 };
437
438 MODULE_LICENSE("GPL");
439 MODULE_VERSION(DRV_VERSION);
440
441
442 /* Module Loadable parameters. */
443 S2IO_PARM_INT(tx_fifo_num, 1);
444 S2IO_PARM_INT(rx_ring_num, 1);
445
446
447 S2IO_PARM_INT(rx_ring_mode, 1);
448 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
449 S2IO_PARM_INT(rmac_pause_time, 0x100);
450 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
451 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
452 S2IO_PARM_INT(shared_splits, 0);
453 S2IO_PARM_INT(tmac_util_period, 5);
454 S2IO_PARM_INT(rmac_util_period, 5);
455 S2IO_PARM_INT(bimodal, 0);
456 S2IO_PARM_INT(l3l4hdr_size, 128);
457 /* Frequency of Rx desc syncs expressed as power of 2 */
458 S2IO_PARM_INT(rxsync_frequency, 3);
459 /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
460 S2IO_PARM_INT(intr_type, 2);
461 /* Large receive offload feature */
462 S2IO_PARM_INT(lro, 0);
463 /* Max pkts to be aggregated by LRO at one time. If not specified,
464  * aggregation happens until we hit max IP pkt size(64K)
465  */
466 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
467 S2IO_PARM_INT(indicate_max_pkts, 0);
468
469 S2IO_PARM_INT(napi, 1);
470 S2IO_PARM_INT(ufo, 0);
471 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
472
473 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
474     {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
475 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
476     {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
477 static unsigned int rts_frm_len[MAX_RX_RINGS] =
478     {[0 ...(MAX_RX_RINGS - 1)] = 0 };
479
480 module_param_array(tx_fifo_len, uint, NULL, 0);
481 module_param_array(rx_ring_sz, uint, NULL, 0);
482 module_param_array(rts_frm_len, uint, NULL, 0);
483
484 /*
485  * S2IO device table.
486  * This table lists all the devices that this driver supports.
487  */
488 static struct pci_device_id s2io_tbl[] __devinitdata = {
489         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
490          PCI_ANY_ID, PCI_ANY_ID},
491         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
492          PCI_ANY_ID, PCI_ANY_ID},
493         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
494          PCI_ANY_ID, PCI_ANY_ID},
495         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
496          PCI_ANY_ID, PCI_ANY_ID},
497         {0,}
498 };
499
500 MODULE_DEVICE_TABLE(pci, s2io_tbl);
501
502 static struct pci_error_handlers s2io_err_handler = {
503         .error_detected = s2io_io_error_detected,
504         .slot_reset = s2io_io_slot_reset,
505         .resume = s2io_io_resume,
506 };
507
508 static struct pci_driver s2io_driver = {
509       .name = "S2IO",
510       .id_table = s2io_tbl,
511       .probe = s2io_init_nic,
512       .remove = __devexit_p(s2io_rem_nic),
513       .err_handler = &s2io_err_handler,
514 };
515
516 /* A simplifier macro used both by init and free shared_mem Fns(). */
517 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
518
519 /**
520  * init_shared_mem - Allocation and Initialization of Memory
521  * @nic: Device private variable.
522  * Description: The function allocates all the memory areas shared
523  * between the NIC and the driver. This includes Tx descriptors,
524  * Rx descriptors and the statistics block.
525  */
526
527 static int init_shared_mem(struct s2io_nic *nic)
528 {
529         u32 size;
530         void *tmp_v_addr, *tmp_v_addr_next;
531         dma_addr_t tmp_p_addr, tmp_p_addr_next;
532         struct RxD_block *pre_rxd_blk = NULL;
533         int i, j, blk_cnt;
534         int lst_size, lst_per_page;
535         struct net_device *dev = nic->dev;
536         unsigned long tmp;
537         struct buffAdd *ba;
538
539         struct mac_info *mac_control;
540         struct config_param *config;
541         unsigned long long mem_allocated = 0;
542
543         mac_control = &nic->mac_control;
544         config = &nic->config;
545
546
547         /* Allocation and initialization of TXDLs in FIOFs */
548         size = 0;
549         for (i = 0; i < config->tx_fifo_num; i++) {
550                 size += config->tx_cfg[i].fifo_len;
551         }
552         if (size > MAX_AVAILABLE_TXDS) {
553                 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
554                 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
555                 return -EINVAL;
556         }
557
558         lst_size = (sizeof(struct TxD) * config->max_txds);
559         lst_per_page = PAGE_SIZE / lst_size;
560
561         for (i = 0; i < config->tx_fifo_num; i++) {
562                 int fifo_len = config->tx_cfg[i].fifo_len;
563                 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
564                 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
565                                                           GFP_KERNEL);
566                 if (!mac_control->fifos[i].list_info) {
567                         DBG_PRINT(INFO_DBG,
568                                   "Malloc failed for list_info\n");
569                         return -ENOMEM;
570                 }
571                 mem_allocated += list_holder_size;
572                 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
573         }
574         for (i = 0; i < config->tx_fifo_num; i++) {
575                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
576                                                 lst_per_page);
577                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
578                 mac_control->fifos[i].tx_curr_put_info.fifo_len =
579                     config->tx_cfg[i].fifo_len - 1;
580                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
581                 mac_control->fifos[i].tx_curr_get_info.fifo_len =
582                     config->tx_cfg[i].fifo_len - 1;
583                 mac_control->fifos[i].fifo_no = i;
584                 mac_control->fifos[i].nic = nic;
585                 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
586
587                 for (j = 0; j < page_num; j++) {
588                         int k = 0;
589                         dma_addr_t tmp_p;
590                         void *tmp_v;
591                         tmp_v = pci_alloc_consistent(nic->pdev,
592                                                      PAGE_SIZE, &tmp_p);
593                         if (!tmp_v) {
594                                 DBG_PRINT(INFO_DBG,
595                                           "pci_alloc_consistent ");
596                                 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
597                                 return -ENOMEM;
598                         }
599                         /* If we got a zero DMA address(can happen on
600                          * certain platforms like PPC), reallocate.
601                          * Store virtual address of page we don't want,
602                          * to be freed later.
603                          */
604                         if (!tmp_p) {
605                                 mac_control->zerodma_virt_addr = tmp_v;
606                                 DBG_PRINT(INIT_DBG,
607                                 "%s: Zero DMA address for TxDL. ", dev->name);
608                                 DBG_PRINT(INIT_DBG,
609                                 "Virtual address %p\n", tmp_v);
610                                 tmp_v = pci_alloc_consistent(nic->pdev,
611                                                      PAGE_SIZE, &tmp_p);
612                                 if (!tmp_v) {
613                                         DBG_PRINT(INFO_DBG,
614                                           "pci_alloc_consistent ");
615                                         DBG_PRINT(INFO_DBG, "failed for TxDL\n");
616                                         return -ENOMEM;
617                                 }
618                                 mem_allocated += PAGE_SIZE;
619                         }
620                         while (k < lst_per_page) {
621                                 int l = (j * lst_per_page) + k;
622                                 if (l == config->tx_cfg[i].fifo_len)
623                                         break;
624                                 mac_control->fifos[i].list_info[l].list_virt_addr =
625                                     tmp_v + (k * lst_size);
626                                 mac_control->fifos[i].list_info[l].list_phy_addr =
627                                     tmp_p + (k * lst_size);
628                                 k++;
629                         }
630                 }
631         }
632
633         nic->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
634         if (!nic->ufo_in_band_v)
635                 return -ENOMEM;
636          mem_allocated += (size * sizeof(u64));
637
638         /* Allocation and initialization of RXDs in Rings */
639         size = 0;
640         for (i = 0; i < config->rx_ring_num; i++) {
641                 if (config->rx_cfg[i].num_rxd %
642                     (rxd_count[nic->rxd_mode] + 1)) {
643                         DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
644                         DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
645                                   i);
646                         DBG_PRINT(ERR_DBG, "RxDs per Block");
647                         return FAILURE;
648                 }
649                 size += config->rx_cfg[i].num_rxd;
650                 mac_control->rings[i].block_count =
651                         config->rx_cfg[i].num_rxd /
652                         (rxd_count[nic->rxd_mode] + 1 );
653                 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
654                         mac_control->rings[i].block_count;
655         }
656         if (nic->rxd_mode == RXD_MODE_1)
657                 size = (size * (sizeof(struct RxD1)));
658         else
659                 size = (size * (sizeof(struct RxD3)));
660
661         for (i = 0; i < config->rx_ring_num; i++) {
662                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
663                 mac_control->rings[i].rx_curr_get_info.offset = 0;
664                 mac_control->rings[i].rx_curr_get_info.ring_len =
665                     config->rx_cfg[i].num_rxd - 1;
666                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
667                 mac_control->rings[i].rx_curr_put_info.offset = 0;
668                 mac_control->rings[i].rx_curr_put_info.ring_len =
669                     config->rx_cfg[i].num_rxd - 1;
670                 mac_control->rings[i].nic = nic;
671                 mac_control->rings[i].ring_no = i;
672
673                 blk_cnt = config->rx_cfg[i].num_rxd /
674                                 (rxd_count[nic->rxd_mode] + 1);
675                 /*  Allocating all the Rx blocks */
676                 for (j = 0; j < blk_cnt; j++) {
677                         struct rx_block_info *rx_blocks;
678                         int l;
679
680                         rx_blocks = &mac_control->rings[i].rx_blocks[j];
681                         size = SIZE_OF_BLOCK; //size is always page size
682                         tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
683                                                           &tmp_p_addr);
684                         if (tmp_v_addr == NULL) {
685                                 /*
686                                  * In case of failure, free_shared_mem()
687                                  * is called, which should free any
688                                  * memory that was alloced till the
689                                  * failure happened.
690                                  */
691                                 rx_blocks->block_virt_addr = tmp_v_addr;
692                                 return -ENOMEM;
693                         }
694                         mem_allocated += size;
695                         memset(tmp_v_addr, 0, size);
696                         rx_blocks->block_virt_addr = tmp_v_addr;
697                         rx_blocks->block_dma_addr = tmp_p_addr;
698                         rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
699                                                   rxd_count[nic->rxd_mode],
700                                                   GFP_KERNEL);
701                         if (!rx_blocks->rxds)
702                                 return -ENOMEM;
703                         mem_allocated += 
704                         (sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
705                         for (l=0; l<rxd_count[nic->rxd_mode];l++) {
706                                 rx_blocks->rxds[l].virt_addr =
707                                         rx_blocks->block_virt_addr +
708                                         (rxd_size[nic->rxd_mode] * l);
709                                 rx_blocks->rxds[l].dma_addr =
710                                         rx_blocks->block_dma_addr +
711                                         (rxd_size[nic->rxd_mode] * l);
712                         }
713                 }
714                 /* Interlinking all Rx Blocks */
715                 for (j = 0; j < blk_cnt; j++) {
716                         tmp_v_addr =
717                                 mac_control->rings[i].rx_blocks[j].block_virt_addr;
718                         tmp_v_addr_next =
719                                 mac_control->rings[i].rx_blocks[(j + 1) %
720                                               blk_cnt].block_virt_addr;
721                         tmp_p_addr =
722                                 mac_control->rings[i].rx_blocks[j].block_dma_addr;
723                         tmp_p_addr_next =
724                                 mac_control->rings[i].rx_blocks[(j + 1) %
725                                               blk_cnt].block_dma_addr;
726
727                         pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
728                         pre_rxd_blk->reserved_2_pNext_RxD_block =
729                             (unsigned long) tmp_v_addr_next;
730                         pre_rxd_blk->pNext_RxD_Blk_physical =
731                             (u64) tmp_p_addr_next;
732                 }
733         }
734         if (nic->rxd_mode == RXD_MODE_3B) {
735                 /*
736                  * Allocation of Storages for buffer addresses in 2BUFF mode
737                  * and the buffers as well.
738                  */
739                 for (i = 0; i < config->rx_ring_num; i++) {
740                         blk_cnt = config->rx_cfg[i].num_rxd /
741                            (rxd_count[nic->rxd_mode]+ 1);
742                         mac_control->rings[i].ba =
743                                 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
744                                      GFP_KERNEL);
745                         if (!mac_control->rings[i].ba)
746                                 return -ENOMEM;
747                         mem_allocated +=(sizeof(struct buffAdd *) * blk_cnt);
748                         for (j = 0; j < blk_cnt; j++) {
749                                 int k = 0;
750                                 mac_control->rings[i].ba[j] =
751                                         kmalloc((sizeof(struct buffAdd) *
752                                                 (rxd_count[nic->rxd_mode] + 1)),
753                                                 GFP_KERNEL);
754                                 if (!mac_control->rings[i].ba[j])
755                                         return -ENOMEM;
756                                 mem_allocated += (sizeof(struct buffAdd) *  \
757                                         (rxd_count[nic->rxd_mode] + 1));
758                                 while (k != rxd_count[nic->rxd_mode]) {
759                                         ba = &mac_control->rings[i].ba[j][k];
760
761                                         ba->ba_0_org = (void *) kmalloc
762                                             (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
763                                         if (!ba->ba_0_org)
764                                                 return -ENOMEM;
765                                         mem_allocated += 
766                                                 (BUF0_LEN + ALIGN_SIZE);
767                                         tmp = (unsigned long)ba->ba_0_org;
768                                         tmp += ALIGN_SIZE;
769                                         tmp &= ~((unsigned long) ALIGN_SIZE);
770                                         ba->ba_0 = (void *) tmp;
771
772                                         ba->ba_1_org = (void *) kmalloc
773                                             (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
774                                         if (!ba->ba_1_org)
775                                                 return -ENOMEM;
776                                         mem_allocated 
777                                                 += (BUF1_LEN + ALIGN_SIZE);
778                                         tmp = (unsigned long) ba->ba_1_org;
779                                         tmp += ALIGN_SIZE;
780                                         tmp &= ~((unsigned long) ALIGN_SIZE);
781                                         ba->ba_1 = (void *) tmp;
782                                         k++;
783                                 }
784                         }
785                 }
786         }
787
788         /* Allocation and initialization of Statistics block */
789         size = sizeof(struct stat_block);
790         mac_control->stats_mem = pci_alloc_consistent
791             (nic->pdev, size, &mac_control->stats_mem_phy);
792
793         if (!mac_control->stats_mem) {
794                 /*
795                  * In case of failure, free_shared_mem() is called, which
796                  * should free any memory that was alloced till the
797                  * failure happened.
798                  */
799                 return -ENOMEM;
800         }
801         mem_allocated += size;
802         mac_control->stats_mem_sz = size;
803
804         tmp_v_addr = mac_control->stats_mem;
805         mac_control->stats_info = (struct stat_block *) tmp_v_addr;
806         memset(tmp_v_addr, 0, size);
807         DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
808                   (unsigned long long) tmp_p_addr);
809         mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
810         return SUCCESS;
811 }
812
813 /**
814  * free_shared_mem - Free the allocated Memory
815  * @nic:  Device private variable.
816  * Description: This function is to free all memory locations allocated by
817  * the init_shared_mem() function and return it to the kernel.
818  */
819
820 static void free_shared_mem(struct s2io_nic *nic)
821 {
822         int i, j, blk_cnt, size;
823         u32 ufo_size = 0;
824         void *tmp_v_addr;
825         dma_addr_t tmp_p_addr;
826         struct mac_info *mac_control;
827         struct config_param *config;
828         int lst_size, lst_per_page;
829         struct net_device *dev;
830         int page_num = 0;
831
832         if (!nic)
833                 return;
834
835         dev = nic->dev;
836
837         mac_control = &nic->mac_control;
838         config = &nic->config;
839
840         lst_size = (sizeof(struct TxD) * config->max_txds);
841         lst_per_page = PAGE_SIZE / lst_size;
842
843         for (i = 0; i < config->tx_fifo_num; i++) {
844                 ufo_size += config->tx_cfg[i].fifo_len;
845                 page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
846                                                         lst_per_page);
847                 for (j = 0; j < page_num; j++) {
848                         int mem_blks = (j * lst_per_page);
849                         if (!mac_control->fifos[i].list_info)
850                                 return;
851                         if (!mac_control->fifos[i].list_info[mem_blks].
852                                  list_virt_addr)
853                                 break;
854                         pci_free_consistent(nic->pdev, PAGE_SIZE,
855                                             mac_control->fifos[i].
856                                             list_info[mem_blks].
857                                             list_virt_addr,
858                                             mac_control->fifos[i].
859                                             list_info[mem_blks].
860                                             list_phy_addr);
861                         nic->mac_control.stats_info->sw_stat.mem_freed 
862                                                 += PAGE_SIZE;
863                 }
864                 /* If we got a zero DMA address during allocation,
865                  * free the page now
866                  */
867                 if (mac_control->zerodma_virt_addr) {
868                         pci_free_consistent(nic->pdev, PAGE_SIZE,
869                                             mac_control->zerodma_virt_addr,
870                                             (dma_addr_t)0);
871                         DBG_PRINT(INIT_DBG,
872                                 "%s: Freeing TxDL with zero DMA addr. ",
873                                 dev->name);
874                         DBG_PRINT(INIT_DBG, "Virtual address %p\n",
875                                 mac_control->zerodma_virt_addr);
876                         nic->mac_control.stats_info->sw_stat.mem_freed 
877                                                 += PAGE_SIZE;
878                 }
879                 kfree(mac_control->fifos[i].list_info);
880                 nic->mac_control.stats_info->sw_stat.mem_freed += 
881                 (nic->config.tx_cfg[i].fifo_len *sizeof(struct list_info_hold));
882         }
883
884         size = SIZE_OF_BLOCK;
885         for (i = 0; i < config->rx_ring_num; i++) {
886                 blk_cnt = mac_control->rings[i].block_count;
887                 for (j = 0; j < blk_cnt; j++) {
888                         tmp_v_addr = mac_control->rings[i].rx_blocks[j].
889                                 block_virt_addr;
890                         tmp_p_addr = mac_control->rings[i].rx_blocks[j].
891                                 block_dma_addr;
892                         if (tmp_v_addr == NULL)
893                                 break;
894                         pci_free_consistent(nic->pdev, size,
895                                             tmp_v_addr, tmp_p_addr);
896                         nic->mac_control.stats_info->sw_stat.mem_freed += size;
897                         kfree(mac_control->rings[i].rx_blocks[j].rxds);
898                         nic->mac_control.stats_info->sw_stat.mem_freed += 
899                         ( sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
900                 }
901         }
902
903         if (nic->rxd_mode == RXD_MODE_3B) {
904                 /* Freeing buffer storage addresses in 2BUFF mode. */
905                 for (i = 0; i < config->rx_ring_num; i++) {
906                         blk_cnt = config->rx_cfg[i].num_rxd /
907                             (rxd_count[nic->rxd_mode] + 1);
908                         for (j = 0; j < blk_cnt; j++) {
909                                 int k = 0;
910                                 if (!mac_control->rings[i].ba[j])
911                                         continue;
912                                 while (k != rxd_count[nic->rxd_mode]) {
913                                         struct buffAdd *ba =
914                                                 &mac_control->rings[i].ba[j][k];
915                                         kfree(ba->ba_0_org);
916                                         nic->mac_control.stats_info->sw_stat.\
917                                         mem_freed += (BUF0_LEN + ALIGN_SIZE);
918                                         kfree(ba->ba_1_org);
919                                         nic->mac_control.stats_info->sw_stat.\
920                                         mem_freed += (BUF1_LEN + ALIGN_SIZE);
921                                         k++;
922                                 }
923                                 kfree(mac_control->rings[i].ba[j]);
924                                 nic->mac_control.stats_info->sw_stat.mem_freed +=
925                                         (sizeof(struct buffAdd) *
926                                         (rxd_count[nic->rxd_mode] + 1));
927                         }
928                         kfree(mac_control->rings[i].ba);
929                         nic->mac_control.stats_info->sw_stat.mem_freed += 
930                         (sizeof(struct buffAdd *) * blk_cnt);
931                 }
932         }
933
934         if (mac_control->stats_mem) {
935                 pci_free_consistent(nic->pdev,
936                                     mac_control->stats_mem_sz,
937                                     mac_control->stats_mem,
938                                     mac_control->stats_mem_phy);
939                 nic->mac_control.stats_info->sw_stat.mem_freed += 
940                         mac_control->stats_mem_sz;
941         }
942         if (nic->ufo_in_band_v) {
943                 kfree(nic->ufo_in_band_v);
944                 nic->mac_control.stats_info->sw_stat.mem_freed 
945                         += (ufo_size * sizeof(u64));
946         }
947 }
948
949 /**
950  * s2io_verify_pci_mode -
951  */
952
953 static int s2io_verify_pci_mode(struct s2io_nic *nic)
954 {
955         struct XENA_dev_config __iomem *bar0 = nic->bar0;
956         register u64 val64 = 0;
957         int     mode;
958
959         val64 = readq(&bar0->pci_mode);
960         mode = (u8)GET_PCI_MODE(val64);
961
962         if ( val64 & PCI_MODE_UNKNOWN_MODE)
963                 return -1;      /* Unknown PCI mode */
964         return mode;
965 }
966
967 #define NEC_VENID   0x1033
968 #define NEC_DEVID   0x0125
969 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
970 {
971         struct pci_dev *tdev = NULL;
972         while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
973                 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
974                         if (tdev->bus == s2io_pdev->bus->parent)
975                                 pci_dev_put(tdev);
976                                 return 1;
977                 }
978         }
979         return 0;
980 }
981
982 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
983 /**
984  * s2io_print_pci_mode -
985  */
986 static int s2io_print_pci_mode(struct s2io_nic *nic)
987 {
988         struct XENA_dev_config __iomem *bar0 = nic->bar0;
989         register u64 val64 = 0;
990         int     mode;
991         struct config_param *config = &nic->config;
992
993         val64 = readq(&bar0->pci_mode);
994         mode = (u8)GET_PCI_MODE(val64);
995
996         if ( val64 & PCI_MODE_UNKNOWN_MODE)
997                 return -1;      /* Unknown PCI mode */
998
999         config->bus_speed = bus_speed[mode];
1000
1001         if (s2io_on_nec_bridge(nic->pdev)) {
1002                 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1003                                                         nic->dev->name);
1004                 return mode;
1005         }
1006
1007         if (val64 & PCI_MODE_32_BITS) {
1008                 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
1009         } else {
1010                 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
1011         }
1012
1013         switch(mode) {
1014                 case PCI_MODE_PCI_33:
1015                         DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
1016                         break;
1017                 case PCI_MODE_PCI_66:
1018                         DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
1019                         break;
1020                 case PCI_MODE_PCIX_M1_66:
1021                         DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
1022                         break;
1023                 case PCI_MODE_PCIX_M1_100:
1024                         DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
1025                         break;
1026                 case PCI_MODE_PCIX_M1_133:
1027                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
1028                         break;
1029                 case PCI_MODE_PCIX_M2_66:
1030                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
1031                         break;
1032                 case PCI_MODE_PCIX_M2_100:
1033                         DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
1034                         break;
1035                 case PCI_MODE_PCIX_M2_133:
1036                         DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
1037                         break;
1038                 default:
1039                         return -1;      /* Unsupported bus speed */
1040         }
1041
1042         return mode;
1043 }
1044
1045 /**
1046  *  init_nic - Initialization of hardware
1047  *  @nic: device peivate variable
1048  *  Description: The function sequentially configures every block
1049  *  of the H/W from their reset values.
1050  *  Return Value:  SUCCESS on success and
1051  *  '-1' on failure (endian settings incorrect).
1052  */
1053
1054 static int init_nic(struct s2io_nic *nic)
1055 {
1056         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1057         struct net_device *dev = nic->dev;
1058         register u64 val64 = 0;
1059         void __iomem *add;
1060         u32 time;
1061         int i, j;
1062         struct mac_info *mac_control;
1063         struct config_param *config;
1064         int dtx_cnt = 0;
1065         unsigned long long mem_share;
1066         int mem_size;
1067
1068         mac_control = &nic->mac_control;
1069         config = &nic->config;
1070
1071         /* to set the swapper controle on the card */
1072         if(s2io_set_swapper(nic)) {
1073                 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
1074                 return -1;
1075         }
1076
1077         /*
1078          * Herc requires EOI to be removed from reset before XGXS, so..
1079          */
1080         if (nic->device_type & XFRAME_II_DEVICE) {
1081                 val64 = 0xA500000000ULL;
1082                 writeq(val64, &bar0->sw_reset);
1083                 msleep(500);
1084                 val64 = readq(&bar0->sw_reset);
1085         }
1086
1087         /* Remove XGXS from reset state */
1088         val64 = 0;
1089         writeq(val64, &bar0->sw_reset);
1090         msleep(500);
1091         val64 = readq(&bar0->sw_reset);
1092
1093         /*  Enable Receiving broadcasts */
1094         add = &bar0->mac_cfg;
1095         val64 = readq(&bar0->mac_cfg);
1096         val64 |= MAC_RMAC_BCAST_ENABLE;
1097         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1098         writel((u32) val64, add);
1099         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1100         writel((u32) (val64 >> 32), (add + 4));
1101
1102         /* Read registers in all blocks */
1103         val64 = readq(&bar0->mac_int_mask);
1104         val64 = readq(&bar0->mc_int_mask);
1105         val64 = readq(&bar0->xgxs_int_mask);
1106
1107         /*  Set MTU */
1108         val64 = dev->mtu;
1109         writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1110
1111         if (nic->device_type & XFRAME_II_DEVICE) {
1112                 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1113                         SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1114                                           &bar0->dtx_control, UF);
1115                         if (dtx_cnt & 0x1)
1116                                 msleep(1); /* Necessary!! */
1117                         dtx_cnt++;
1118                 }
1119         } else {
1120                 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1121                         SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1122                                           &bar0->dtx_control, UF);
1123                         val64 = readq(&bar0->dtx_control);
1124                         dtx_cnt++;
1125                 }
1126         }
1127
1128         /*  Tx DMA Initialization */
1129         val64 = 0;
1130         writeq(val64, &bar0->tx_fifo_partition_0);
1131         writeq(val64, &bar0->tx_fifo_partition_1);
1132         writeq(val64, &bar0->tx_fifo_partition_2);
1133         writeq(val64, &bar0->tx_fifo_partition_3);
1134
1135
1136         for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1137                 val64 |=
1138                     vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
1139                          13) | vBIT(config->tx_cfg[i].fifo_priority,
1140                                     ((i * 32) + 5), 3);
1141
1142                 if (i == (config->tx_fifo_num - 1)) {
1143                         if (i % 2 == 0)
1144                                 i++;
1145                 }
1146
1147                 switch (i) {
1148                 case 1:
1149                         writeq(val64, &bar0->tx_fifo_partition_0);
1150                         val64 = 0;
1151                         break;
1152                 case 3:
1153                         writeq(val64, &bar0->tx_fifo_partition_1);
1154                         val64 = 0;
1155                         break;
1156                 case 5:
1157                         writeq(val64, &bar0->tx_fifo_partition_2);
1158                         val64 = 0;
1159                         break;
1160                 case 7:
1161                         writeq(val64, &bar0->tx_fifo_partition_3);
1162                         break;
1163                 }
1164         }
1165
1166         /*
1167          * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1168          * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1169          */
1170         if ((nic->device_type == XFRAME_I_DEVICE) &&
1171                 (nic->pdev->revision < 4))
1172                 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1173
1174         val64 = readq(&bar0->tx_fifo_partition_0);
1175         DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1176                   &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1177
1178         /*
1179          * Initialization of Tx_PA_CONFIG register to ignore packet
1180          * integrity checking.
1181          */
1182         val64 = readq(&bar0->tx_pa_cfg);
1183         val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1184             TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1185         writeq(val64, &bar0->tx_pa_cfg);
1186
1187         /* Rx DMA intialization. */
1188         val64 = 0;
1189         for (i = 0; i < config->rx_ring_num; i++) {
1190                 val64 |=
1191                     vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1192                          3);
1193         }
1194         writeq(val64, &bar0->rx_queue_priority);
1195
1196         /*
1197          * Allocating equal share of memory to all the
1198          * configured Rings.
1199          */
1200         val64 = 0;
1201         if (nic->device_type & XFRAME_II_DEVICE)
1202                 mem_size = 32;
1203         else
1204                 mem_size = 64;
1205
1206         for (i = 0; i < config->rx_ring_num; i++) {
1207                 switch (i) {
1208                 case 0:
1209                         mem_share = (mem_size / config->rx_ring_num +
1210                                      mem_size % config->rx_ring_num);
1211                         val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1212                         continue;
1213                 case 1:
1214                         mem_share = (mem_size / config->rx_ring_num);
1215                         val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1216                         continue;
1217                 case 2:
1218                         mem_share = (mem_size / config->rx_ring_num);
1219                         val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1220                         continue;
1221                 case 3:
1222                         mem_share = (mem_size / config->rx_ring_num);
1223                         val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1224                         continue;
1225                 case 4:
1226                         mem_share = (mem_size / config->rx_ring_num);
1227                         val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1228                         continue;
1229                 case 5:
1230                         mem_share = (mem_size / config->rx_ring_num);
1231                         val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1232                         continue;
1233                 case 6:
1234                         mem_share = (mem_size / config->rx_ring_num);
1235                         val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1236                         continue;
1237                 case 7:
1238                         mem_share = (mem_size / config->rx_ring_num);
1239                         val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1240                         continue;
1241                 }
1242         }
1243         writeq(val64, &bar0->rx_queue_cfg);
1244
1245         /*
1246          * Filling Tx round robin registers
1247          * as per the number of FIFOs
1248          */
1249         switch (config->tx_fifo_num) {
1250         case 1:
1251                 val64 = 0x0000000000000000ULL;
1252                 writeq(val64, &bar0->tx_w_round_robin_0);
1253                 writeq(val64, &bar0->tx_w_round_robin_1);
1254                 writeq(val64, &bar0->tx_w_round_robin_2);
1255                 writeq(val64, &bar0->tx_w_round_robin_3);
1256                 writeq(val64, &bar0->tx_w_round_robin_4);
1257                 break;
1258         case 2:
1259                 val64 = 0x0000010000010000ULL;
1260                 writeq(val64, &bar0->tx_w_round_robin_0);
1261                 val64 = 0x0100000100000100ULL;
1262                 writeq(val64, &bar0->tx_w_round_robin_1);
1263                 val64 = 0x0001000001000001ULL;
1264                 writeq(val64, &bar0->tx_w_round_robin_2);
1265                 val64 = 0x0000010000010000ULL;
1266                 writeq(val64, &bar0->tx_w_round_robin_3);
1267                 val64 = 0x0100000000000000ULL;
1268                 writeq(val64, &bar0->tx_w_round_robin_4);
1269                 break;
1270         case 3:
1271                 val64 = 0x0001000102000001ULL;
1272                 writeq(val64, &bar0->tx_w_round_robin_0);
1273                 val64 = 0x0001020000010001ULL;
1274                 writeq(val64, &bar0->tx_w_round_robin_1);
1275                 val64 = 0x0200000100010200ULL;
1276                 writeq(val64, &bar0->tx_w_round_robin_2);
1277                 val64 = 0x0001000102000001ULL;
1278                 writeq(val64, &bar0->tx_w_round_robin_3);
1279                 val64 = 0x0001020000000000ULL;
1280                 writeq(val64, &bar0->tx_w_round_robin_4);
1281                 break;
1282         case 4:
1283                 val64 = 0x0001020300010200ULL;
1284                 writeq(val64, &bar0->tx_w_round_robin_0);
1285                 val64 = 0x0100000102030001ULL;
1286                 writeq(val64, &bar0->tx_w_round_robin_1);
1287                 val64 = 0x0200010000010203ULL;
1288                 writeq(val64, &bar0->tx_w_round_robin_2);
1289                 val64 = 0x0001020001000001ULL;
1290                 writeq(val64, &bar0->tx_w_round_robin_3);
1291                 val64 = 0x0203000100000000ULL;
1292                 writeq(val64, &bar0->tx_w_round_robin_4);
1293                 break;
1294         case 5:
1295                 val64 = 0x0001000203000102ULL;
1296                 writeq(val64, &bar0->tx_w_round_robin_0);
1297                 val64 = 0x0001020001030004ULL;
1298                 writeq(val64, &bar0->tx_w_round_robin_1);
1299                 val64 = 0x0001000203000102ULL;
1300                 writeq(val64, &bar0->tx_w_round_robin_2);
1301                 val64 = 0x0001020001030004ULL;
1302                 writeq(val64, &bar0->tx_w_round_robin_3);
1303                 val64 = 0x0001000000000000ULL;
1304                 writeq(val64, &bar0->tx_w_round_robin_4);
1305                 break;
1306         case 6:
1307                 val64 = 0x0001020304000102ULL;
1308                 writeq(val64, &bar0->tx_w_round_robin_0);
1309                 val64 = 0x0304050001020001ULL;
1310                 writeq(val64, &bar0->tx_w_round_robin_1);
1311                 val64 = 0x0203000100000102ULL;
1312                 writeq(val64, &bar0->tx_w_round_robin_2);
1313                 val64 = 0x0304000102030405ULL;
1314                 writeq(val64, &bar0->tx_w_round_robin_3);
1315                 val64 = 0x0001000200000000ULL;
1316                 writeq(val64, &bar0->tx_w_round_robin_4);
1317                 break;
1318         case 7:
1319                 val64 = 0x0001020001020300ULL;
1320                 writeq(val64, &bar0->tx_w_round_robin_0);
1321                 val64 = 0x0102030400010203ULL;
1322                 writeq(val64, &bar0->tx_w_round_robin_1);
1323                 val64 = 0x0405060001020001ULL;
1324                 writeq(val64, &bar0->tx_w_round_robin_2);
1325                 val64 = 0x0304050000010200ULL;
1326                 writeq(val64, &bar0->tx_w_round_robin_3);
1327                 val64 = 0x0102030000000000ULL;
1328                 writeq(val64, &bar0->tx_w_round_robin_4);
1329                 break;
1330         case 8:
1331                 val64 = 0x0001020300040105ULL;
1332                 writeq(val64, &bar0->tx_w_round_robin_0);
1333                 val64 = 0x0200030106000204ULL;
1334                 writeq(val64, &bar0->tx_w_round_robin_1);
1335                 val64 = 0x0103000502010007ULL;
1336                 writeq(val64, &bar0->tx_w_round_robin_2);
1337                 val64 = 0x0304010002060500ULL;
1338                 writeq(val64, &bar0->tx_w_round_robin_3);
1339                 val64 = 0x0103020400000000ULL;
1340                 writeq(val64, &bar0->tx_w_round_robin_4);
1341                 break;
1342         }
1343
1344         /* Enable all configured Tx FIFO partitions */
1345         val64 = readq(&bar0->tx_fifo_partition_0);
1346         val64 |= (TX_FIFO_PARTITION_EN);
1347         writeq(val64, &bar0->tx_fifo_partition_0);
1348
1349         /* Filling the Rx round robin registers as per the
1350          * number of Rings and steering based on QoS.
1351          */
1352         switch (config->rx_ring_num) {
1353         case 1:
1354                 val64 = 0x8080808080808080ULL;
1355                 writeq(val64, &bar0->rts_qos_steering);
1356                 break;
1357         case 2:
1358                 val64 = 0x0000010000010000ULL;
1359                 writeq(val64, &bar0->rx_w_round_robin_0);
1360                 val64 = 0x0100000100000100ULL;
1361                 writeq(val64, &bar0->rx_w_round_robin_1);
1362                 val64 = 0x0001000001000001ULL;
1363                 writeq(val64, &bar0->rx_w_round_robin_2);
1364                 val64 = 0x0000010000010000ULL;
1365                 writeq(val64, &bar0->rx_w_round_robin_3);
1366                 val64 = 0x0100000000000000ULL;
1367                 writeq(val64, &bar0->rx_w_round_robin_4);
1368
1369                 val64 = 0x8080808040404040ULL;
1370                 writeq(val64, &bar0->rts_qos_steering);
1371                 break;
1372         case 3:
1373                 val64 = 0x0001000102000001ULL;
1374                 writeq(val64, &bar0->rx_w_round_robin_0);
1375                 val64 = 0x0001020000010001ULL;
1376                 writeq(val64, &bar0->rx_w_round_robin_1);
1377                 val64 = 0x0200000100010200ULL;
1378                 writeq(val64, &bar0->rx_w_round_robin_2);
1379                 val64 = 0x0001000102000001ULL;
1380                 writeq(val64, &bar0->rx_w_round_robin_3);
1381                 val64 = 0x0001020000000000ULL;
1382                 writeq(val64, &bar0->rx_w_round_robin_4);
1383
1384                 val64 = 0x8080804040402020ULL;
1385                 writeq(val64, &bar0->rts_qos_steering);
1386                 break;
1387         case 4:
1388                 val64 = 0x0001020300010200ULL;
1389                 writeq(val64, &bar0->rx_w_round_robin_0);
1390                 val64 = 0x0100000102030001ULL;
1391                 writeq(val64, &bar0->rx_w_round_robin_1);
1392                 val64 = 0x0200010000010203ULL;
1393                 writeq(val64, &bar0->rx_w_round_robin_2);
1394                 val64 = 0x0001020001000001ULL;
1395                 writeq(val64, &bar0->rx_w_round_robin_3);
1396                 val64 = 0x0203000100000000ULL;
1397                 writeq(val64, &bar0->rx_w_round_robin_4);
1398
1399                 val64 = 0x8080404020201010ULL;
1400                 writeq(val64, &bar0->rts_qos_steering);
1401                 break;
1402         case 5:
1403                 val64 = 0x0001000203000102ULL;
1404                 writeq(val64, &bar0->rx_w_round_robin_0);
1405                 val64 = 0x0001020001030004ULL;
1406                 writeq(val64, &bar0->rx_w_round_robin_1);
1407                 val64 = 0x0001000203000102ULL;
1408                 writeq(val64, &bar0->rx_w_round_robin_2);
1409                 val64 = 0x0001020001030004ULL;
1410                 writeq(val64, &bar0->rx_w_round_robin_3);
1411                 val64 = 0x0001000000000000ULL;
1412                 writeq(val64, &bar0->rx_w_round_robin_4);
1413
1414                 val64 = 0x8080404020201008ULL;
1415                 writeq(val64, &bar0->rts_qos_steering);
1416                 break;
1417         case 6:
1418                 val64 = 0x0001020304000102ULL;
1419                 writeq(val64, &bar0->rx_w_round_robin_0);
1420                 val64 = 0x0304050001020001ULL;
1421                 writeq(val64, &bar0->rx_w_round_robin_1);
1422                 val64 = 0x0203000100000102ULL;
1423                 writeq(val64, &bar0->rx_w_round_robin_2);
1424                 val64 = 0x0304000102030405ULL;
1425                 writeq(val64, &bar0->rx_w_round_robin_3);
1426                 val64 = 0x0001000200000000ULL;
1427                 writeq(val64, &bar0->rx_w_round_robin_4);
1428
1429                 val64 = 0x8080404020100804ULL;
1430                 writeq(val64, &bar0->rts_qos_steering);
1431                 break;
1432         case 7:
1433                 val64 = 0x0001020001020300ULL;
1434                 writeq(val64, &bar0->rx_w_round_robin_0);
1435                 val64 = 0x0102030400010203ULL;
1436                 writeq(val64, &bar0->rx_w_round_robin_1);
1437                 val64 = 0x0405060001020001ULL;
1438                 writeq(val64, &bar0->rx_w_round_robin_2);
1439                 val64 = 0x0304050000010200ULL;
1440                 writeq(val64, &bar0->rx_w_round_robin_3);
1441                 val64 = 0x0102030000000000ULL;
1442                 writeq(val64, &bar0->rx_w_round_robin_4);
1443
1444                 val64 = 0x8080402010080402ULL;
1445                 writeq(val64, &bar0->rts_qos_steering);
1446                 break;
1447         case 8:
1448                 val64 = 0x0001020300040105ULL;
1449                 writeq(val64, &bar0->rx_w_round_robin_0);
1450                 val64 = 0x0200030106000204ULL;
1451                 writeq(val64, &bar0->rx_w_round_robin_1);
1452                 val64 = 0x0103000502010007ULL;
1453                 writeq(val64, &bar0->rx_w_round_robin_2);
1454                 val64 = 0x0304010002060500ULL;
1455                 writeq(val64, &bar0->rx_w_round_robin_3);
1456                 val64 = 0x0103020400000000ULL;
1457                 writeq(val64, &bar0->rx_w_round_robin_4);
1458
1459                 val64 = 0x8040201008040201ULL;
1460                 writeq(val64, &bar0->rts_qos_steering);
1461                 break;
1462         }
1463
1464         /* UDP Fix */
1465         val64 = 0;
1466         for (i = 0; i < 8; i++)
1467                 writeq(val64, &bar0->rts_frm_len_n[i]);
1468
1469         /* Set the default rts frame length for the rings configured */
1470         val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1471         for (i = 0 ; i < config->rx_ring_num ; i++)
1472                 writeq(val64, &bar0->rts_frm_len_n[i]);
1473
1474         /* Set the frame length for the configured rings
1475          * desired by the user
1476          */
1477         for (i = 0; i < config->rx_ring_num; i++) {
1478                 /* If rts_frm_len[i] == 0 then it is assumed that user not
1479                  * specified frame length steering.
1480                  * If the user provides the frame length then program
1481                  * the rts_frm_len register for those values or else
1482                  * leave it as it is.
1483                  */
1484                 if (rts_frm_len[i] != 0) {
1485                         writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1486                                 &bar0->rts_frm_len_n[i]);
1487                 }
1488         }
1489         
1490         /* Disable differentiated services steering logic */
1491         for (i = 0; i < 64; i++) {
1492                 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1493                         DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1494                                 dev->name);
1495                         DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
1496                         return FAILURE;
1497                 }
1498         }
1499
1500         /* Program statistics memory */
1501         writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1502
1503         if (nic->device_type == XFRAME_II_DEVICE) {
1504                 val64 = STAT_BC(0x320);
1505                 writeq(val64, &bar0->stat_byte_cnt);
1506         }
1507
1508         /*
1509          * Initializing the sampling rate for the device to calculate the
1510          * bandwidth utilization.
1511          */
1512         val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1513             MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1514         writeq(val64, &bar0->mac_link_util);
1515
1516
1517         /*
1518          * Initializing the Transmit and Receive Traffic Interrupt
1519          * Scheme.
1520          */
1521         /*
1522          * TTI Initialization. Default Tx timer gets us about
1523          * 250 interrupts per sec. Continuous interrupts are enabled
1524          * by default.
1525          */
1526         if (nic->device_type == XFRAME_II_DEVICE) {
1527                 int count = (nic->config.bus_speed * 125)/2;
1528                 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1529         } else {
1530
1531                 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1532         }
1533         val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1534             TTI_DATA1_MEM_TX_URNG_B(0x10) |
1535             TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1536                 if (use_continuous_tx_intrs)
1537                         val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1538         writeq(val64, &bar0->tti_data1_mem);
1539
1540         val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1541             TTI_DATA2_MEM_TX_UFC_B(0x20) |
1542             TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1543         writeq(val64, &bar0->tti_data2_mem);
1544
1545         val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1546         writeq(val64, &bar0->tti_command_mem);
1547
1548         /*
1549          * Once the operation completes, the Strobe bit of the command
1550          * register will be reset. We poll for this particular condition
1551          * We wait for a maximum of 500ms for the operation to complete,
1552          * if it's not complete by then we return error.
1553          */
1554         time = 0;
1555         while (TRUE) {
1556                 val64 = readq(&bar0->tti_command_mem);
1557                 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1558                         break;
1559                 }
1560                 if (time > 10) {
1561                         DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1562                                   dev->name);
1563                         return -1;
1564                 }
1565                 msleep(50);
1566                 time++;
1567         }
1568
1569         if (nic->config.bimodal) {
1570                 int k = 0;
1571                 for (k = 0; k < config->rx_ring_num; k++) {
1572                         val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1573                         val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1574                         writeq(val64, &bar0->tti_command_mem);
1575
1576                 /*
1577                  * Once the operation completes, the Strobe bit of the command
1578                  * register will be reset. We poll for this particular condition
1579                  * We wait for a maximum of 500ms for the operation to complete,
1580                  * if it's not complete by then we return error.
1581                 */
1582                         time = 0;
1583                         while (TRUE) {
1584                                 val64 = readq(&bar0->tti_command_mem);
1585                                 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1586                                         break;
1587                                 }
1588                                 if (time > 10) {
1589                                         DBG_PRINT(ERR_DBG,
1590                                                 "%s: TTI init Failed\n",
1591                                         dev->name);
1592                                         return -1;
1593                                 }
1594                                 time++;
1595                                 msleep(50);
1596                         }
1597                 }
1598         } else {
1599
1600                 /* RTI Initialization */
1601                 if (nic->device_type == XFRAME_II_DEVICE) {
1602                         /*
1603                          * Programmed to generate Apprx 500 Intrs per
1604                          * second
1605                          */
1606                         int count = (nic->config.bus_speed * 125)/4;
1607                         val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1608                 } else {
1609                         val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1610                 }
1611                 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1612                     RTI_DATA1_MEM_RX_URNG_B(0x10) |
1613                     RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1614
1615                 writeq(val64, &bar0->rti_data1_mem);
1616
1617                 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1618                     RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1619                 if (nic->config.intr_type == MSI_X)
1620                     val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1621                                 RTI_DATA2_MEM_RX_UFC_D(0x40));
1622                 else
1623                     val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1624                                 RTI_DATA2_MEM_RX_UFC_D(0x80));
1625                 writeq(val64, &bar0->rti_data2_mem);
1626
1627                 for (i = 0; i < config->rx_ring_num; i++) {
1628                         val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1629                                         | RTI_CMD_MEM_OFFSET(i);
1630                         writeq(val64, &bar0->rti_command_mem);
1631
1632                         /*
1633                          * Once the operation completes, the Strobe bit of the
1634                          * command register will be reset. We poll for this
1635                          * particular condition. We wait for a maximum of 500ms
1636                          * for the operation to complete, if it's not complete
1637                          * by then we return error.
1638                          */
1639                         time = 0;
1640                         while (TRUE) {
1641                                 val64 = readq(&bar0->rti_command_mem);
1642                                 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1643                                         break;
1644                                 }
1645                                 if (time > 10) {
1646                                         DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1647                                                   dev->name);
1648                                         return -1;
1649                                 }
1650                                 time++;
1651                                 msleep(50);
1652                         }
1653                 }
1654         }
1655
1656         /*
1657          * Initializing proper values as Pause threshold into all
1658          * the 8 Queues on Rx side.
1659          */
1660         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1661         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1662
1663         /* Disable RMAC PAD STRIPPING */
1664         add = &bar0->mac_cfg;
1665         val64 = readq(&bar0->mac_cfg);
1666         val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1667         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1668         writel((u32) (val64), add);
1669         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1670         writel((u32) (val64 >> 32), (add + 4));
1671         val64 = readq(&bar0->mac_cfg);
1672
1673         /* Enable FCS stripping by adapter */
1674         add = &bar0->mac_cfg;
1675         val64 = readq(&bar0->mac_cfg);
1676         val64 |= MAC_CFG_RMAC_STRIP_FCS;
1677         if (nic->device_type == XFRAME_II_DEVICE)
1678                 writeq(val64, &bar0->mac_cfg);
1679         else {
1680                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1681                 writel((u32) (val64), add);
1682                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1683                 writel((u32) (val64 >> 32), (add + 4));
1684         }
1685
1686         /*
1687          * Set the time value to be inserted in the pause frame
1688          * generated by xena.
1689          */
1690         val64 = readq(&bar0->rmac_pause_cfg);
1691         val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1692         val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1693         writeq(val64, &bar0->rmac_pause_cfg);
1694
1695         /*
1696          * Set the Threshold Limit for Generating the pause frame
1697          * If the amount of data in any Queue exceeds ratio of
1698          * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1699          * pause frame is generated
1700          */
1701         val64 = 0;
1702         for (i = 0; i < 4; i++) {
1703                 val64 |=
1704                     (((u64) 0xFF00 | nic->mac_control.
1705                       mc_pause_threshold_q0q3)
1706                      << (i * 2 * 8));
1707         }
1708         writeq(val64, &bar0->mc_pause_thresh_q0q3);
1709
1710         val64 = 0;
1711         for (i = 0; i < 4; i++) {
1712                 val64 |=
1713                     (((u64) 0xFF00 | nic->mac_control.
1714                       mc_pause_threshold_q4q7)
1715                      << (i * 2 * 8));
1716         }
1717         writeq(val64, &bar0->mc_pause_thresh_q4q7);
1718
1719         /*
1720          * TxDMA will stop Read request if the number of read split has
1721          * exceeded the limit pointed by shared_splits
1722          */
1723         val64 = readq(&bar0->pic_control);
1724         val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1725         writeq(val64, &bar0->pic_control);
1726
1727         if (nic->config.bus_speed == 266) {
1728                 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1729                 writeq(0x0, &bar0->read_retry_delay);
1730                 writeq(0x0, &bar0->write_retry_delay);
1731         }
1732
1733         /*
1734          * Programming the Herc to split every write transaction
1735          * that does not start on an ADB to reduce disconnects.
1736          */
1737         if (nic->device_type == XFRAME_II_DEVICE) {
1738                 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1739                         MISC_LINK_STABILITY_PRD(3);
1740                 writeq(val64, &bar0->misc_control);
1741                 val64 = readq(&bar0->pic_control2);
1742                 val64 &= ~(BIT(13)|BIT(14)|BIT(15));
1743                 writeq(val64, &bar0->pic_control2);
1744         }
1745         if (strstr(nic->product_name, "CX4")) {
1746                 val64 = TMAC_AVG_IPG(0x17);
1747                 writeq(val64, &bar0->tmac_avg_ipg);
1748         }
1749
1750         return SUCCESS;
1751 }
1752 #define LINK_UP_DOWN_INTERRUPT          1
1753 #define MAC_RMAC_ERR_TIMER              2
1754
1755 static int s2io_link_fault_indication(struct s2io_nic *nic)
1756 {
1757         if (nic->config.intr_type != INTA)
1758                 return MAC_RMAC_ERR_TIMER;
1759         if (nic->device_type == XFRAME_II_DEVICE)
1760                 return LINK_UP_DOWN_INTERRUPT;
1761         else
1762                 return MAC_RMAC_ERR_TIMER;
1763 }
1764
1765 /**
1766  *  do_s2io_write_bits -  update alarm bits in alarm register
1767  *  @value: alarm bits
1768  *  @flag: interrupt status
1769  *  @addr: address value
1770  *  Description: update alarm bits in alarm register
1771  *  Return Value:
1772  *  NONE.
1773  */
1774 static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1775 {
1776         u64 temp64;
1777
1778         temp64 = readq(addr);
1779
1780         if(flag == ENABLE_INTRS)
1781                 temp64 &= ~((u64) value);
1782         else
1783                 temp64 |= ((u64) value);
1784         writeq(temp64, addr);
1785 }
1786
1787 void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1788 {
1789         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1790         register u64 gen_int_mask = 0;
1791
1792         if (mask & TX_DMA_INTR) {
1793
1794                 gen_int_mask |= TXDMA_INT_M;
1795
1796                 do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1797                                 TXDMA_PCC_INT | TXDMA_TTI_INT |
1798                                 TXDMA_LSO_INT | TXDMA_TPA_INT |
1799                                 TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1800
1801                 do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1802                                 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1803                                 PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1804                                 &bar0->pfc_err_mask);
1805
1806                 do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1807                                 TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1808                                 TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1809
1810                 do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1811                                 PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1812                                 PCC_N_SERR | PCC_6_COF_OV_ERR |
1813                                 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1814                                 PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1815                                 PCC_TXB_ECC_SG_ERR, flag, &bar0->pcc_err_mask);
1816
1817                 do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1818                                 TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1819
1820                 do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1821                                 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1822                                 LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1823                                 flag, &bar0->lso_err_mask);
1824
1825                 do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1826                                 flag, &bar0->tpa_err_mask);
1827
1828                 do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1829
1830         }
1831
1832         if (mask & TX_MAC_INTR) {
1833                 gen_int_mask |= TXMAC_INT_M;
1834                 do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1835                                 &bar0->mac_int_mask);
1836                 do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1837                                 TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1838                                 TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1839                                 flag, &bar0->mac_tmac_err_mask);
1840         }
1841
1842         if (mask & TX_XGXS_INTR) {
1843                 gen_int_mask |= TXXGXS_INT_M;
1844                 do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1845                                 &bar0->xgxs_int_mask);
1846                 do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1847                                 TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1848                                 flag, &bar0->xgxs_txgxs_err_mask);
1849         }
1850
1851         if (mask & RX_DMA_INTR) {
1852                 gen_int_mask |= RXDMA_INT_M;
1853                 do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1854                                 RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1855                                 flag, &bar0->rxdma_int_mask);
1856                 do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1857                                 RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1858                                 RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1859                                 RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1860                 do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1861                                 PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1862                                 PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1863                                 &bar0->prc_pcix_err_mask);
1864                 do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
1865                                 RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1866                                 &bar0->rpa_err_mask);
1867                 do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
1868                                 RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1869                                 RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
1870                                 RDA_FRM_ECC_SG_ERR | RDA_MISC_ERR|RDA_PCIX_ERR,
1871                                 flag, &bar0->rda_err_mask);
1872                 do_s2io_write_bits(RTI_SM_ERR_ALARM |
1873                                 RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
1874                                 flag, &bar0->rti_err_mask);
1875         }
1876
1877         if (mask & RX_MAC_INTR) {
1878                 gen_int_mask |= RXMAC_INT_M;
1879                 do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
1880                                 &bar0->mac_int_mask);
1881                 do_s2io_write_bits(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
1882                                 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
1883                                 RMAC_DOUBLE_ECC_ERR |
1884                                 RMAC_LINK_STATE_CHANGE_INT,
1885                                 flag, &bar0->mac_rmac_err_mask);
1886         }
1887
1888         if (mask & RX_XGXS_INTR)
1889         {
1890                 gen_int_mask |= RXXGXS_INT_M;
1891                 do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
1892                                 &bar0->xgxs_int_mask);
1893                 do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
1894                                 &bar0->xgxs_rxgxs_err_mask);
1895         }
1896
1897         if (mask & MC_INTR) {
1898                 gen_int_mask |= MC_INT_M;
1899                 do_s2io_write_bits(MC_INT_MASK_MC_INT, flag, &bar0->mc_int_mask);
1900                 do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
1901                                 MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
1902                                 &bar0->mc_err_mask);
1903         }
1904         nic->general_int_mask = gen_int_mask;
1905
1906         /* Remove this line when alarm interrupts are enabled */
1907         nic->general_int_mask = 0;
1908 }
1909 /**
1910  *  en_dis_able_nic_intrs - Enable or Disable the interrupts
1911  *  @nic: device private variable,
1912  *  @mask: A mask indicating which Intr block must be modified and,
1913  *  @flag: A flag indicating whether to enable or disable the Intrs.
1914  *  Description: This function will either disable or enable the interrupts
1915  *  depending on the flag argument. The mask argument can be used to
1916  *  enable/disable any Intr block.
1917  *  Return Value: NONE.
1918  */
1919
1920 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1921 {
1922         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1923         register u64 temp64 = 0, intr_mask = 0;
1924
1925         intr_mask = nic->general_int_mask;
1926
1927         /*  Top level interrupt classification */
1928         /*  PIC Interrupts */
1929         if (mask & TX_PIC_INTR) {
1930                 /*  Enable PIC Intrs in the general intr mask register */
1931                 intr_mask |= TXPIC_INT_M;
1932                 if (flag == ENABLE_INTRS) {
1933                         /*
1934                          * If Hercules adapter enable GPIO otherwise
1935                          * disable all PCIX, Flash, MDIO, IIC and GPIO
1936                          * interrupts for now.
1937                          * TODO
1938                          */
1939                         if (s2io_link_fault_indication(nic) ==
1940                                         LINK_UP_DOWN_INTERRUPT ) {
1941                                 do_s2io_write_bits(PIC_INT_GPIO, flag,
1942                                                 &bar0->pic_int_mask);
1943                                 do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
1944                                                 &bar0->gpio_int_mask);
1945                         } else
1946                                 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1947                 } else if (flag == DISABLE_INTRS) {
1948                         /*
1949                          * Disable PIC Intrs in the general
1950                          * intr mask register
1951                          */
1952                         writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1953                 }
1954         }
1955
1956         /*  Tx traffic interrupts */
1957         if (mask & TX_TRAFFIC_INTR) {
1958                 intr_mask |= TXTRAFFIC_INT_M;
1959                 if (flag == ENABLE_INTRS) {
1960                         /*
1961                          * Enable all the Tx side interrupts
1962                          * writing 0 Enables all 64 TX interrupt levels
1963                          */
1964                         writeq(0x0, &bar0->tx_traffic_mask);
1965                 } else if (flag == DISABLE_INTRS) {
1966                         /*
1967                          * Disable Tx Traffic Intrs in the general intr mask
1968                          * register.
1969                          */
1970                         writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1971                 }
1972         }
1973
1974         /*  Rx traffic interrupts */
1975         if (mask & RX_TRAFFIC_INTR) {
1976                 intr_mask |= RXTRAFFIC_INT_M;
1977                 if (flag == ENABLE_INTRS) {
1978                         /* writing 0 Enables all 8 RX interrupt levels */
1979                         writeq(0x0, &bar0->rx_traffic_mask);
1980                 } else if (flag == DISABLE_INTRS) {
1981                         /*
1982                          * Disable Rx Traffic Intrs in the general intr mask
1983                          * register.
1984                          */
1985                         writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1986                 }
1987         }
1988
1989         temp64 = readq(&bar0->general_int_mask);
1990         if (flag == ENABLE_INTRS)
1991                 temp64 &= ~((u64) intr_mask);
1992         else
1993                 temp64 = DISABLE_ALL_INTRS;
1994         writeq(temp64, &bar0->general_int_mask);
1995
1996         nic->general_int_mask = readq(&bar0->general_int_mask);
1997 }
1998
1999 /**
2000  *  verify_pcc_quiescent- Checks for PCC quiescent state
2001  *  Return: 1 If PCC is quiescence
2002  *          0 If PCC is not quiescence
2003  */
2004 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
2005 {
2006         int ret = 0, herc;
2007         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2008         u64 val64 = readq(&bar0->adapter_status);
2009         
2010         herc = (sp->device_type == XFRAME_II_DEVICE);
2011
2012         if (flag == FALSE) {
2013                 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2014                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2015                                 ret = 1;
2016                 } else {
2017                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2018                                 ret = 1;
2019                 }
2020         } else {
2021                 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2022                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2023                              ADAPTER_STATUS_RMAC_PCC_IDLE))
2024                                 ret = 1;
2025                 } else {
2026                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2027                              ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2028                                 ret = 1;
2029                 }
2030         }
2031
2032         return ret;
2033 }
2034 /**
2035  *  verify_xena_quiescence - Checks whether the H/W is ready
2036  *  Description: Returns whether the H/W is ready to go or not. Depending
2037  *  on whether adapter enable bit was written or not the comparison
2038  *  differs and the calling function passes the input argument flag to
2039  *  indicate this.
2040  *  Return: 1 If xena is quiescence
2041  *          0 If Xena is not quiescence
2042  */
2043
2044 static int verify_xena_quiescence(struct s2io_nic *sp)
2045 {
2046         int  mode;
2047         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2048         u64 val64 = readq(&bar0->adapter_status);
2049         mode = s2io_verify_pci_mode(sp);
2050
2051         if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2052                 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
2053                 return 0;
2054         }
2055         if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2056         DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
2057                 return 0;
2058         }
2059         if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2060                 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
2061                 return 0;
2062         }
2063         if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2064                 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
2065                 return 0;
2066         }
2067         if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2068                 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
2069                 return 0;
2070         }
2071         if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2072                 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
2073                 return 0;
2074         }
2075         if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2076                 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
2077                 return 0;
2078         }
2079         if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2080                 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
2081                 return 0;
2082         }
2083
2084         /*
2085          * In PCI 33 mode, the P_PLL is not used, and therefore,
2086          * the the P_PLL_LOCK bit in the adapter_status register will
2087          * not be asserted.
2088          */
2089         if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2090                 sp->device_type == XFRAME_II_DEVICE && mode !=
2091                 PCI_MODE_PCI_33) {
2092                 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
2093                 return 0;
2094         }
2095         if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2096                         ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2097                 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
2098                 return 0;
2099         }
2100         return 1;
2101 }
2102
2103 /**
2104  * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
2105  * @sp: Pointer to device specifc structure
2106  * Description :
2107  * New procedure to clear mac address reading  problems on Alpha platforms
2108  *
2109  */
2110
2111 static void fix_mac_address(struct s2io_nic * sp)
2112 {
2113         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2114         u64 val64;
2115         int i = 0;
2116
2117         while (fix_mac[i] != END_SIGN) {
2118                 writeq(fix_mac[i++], &bar0->gpio_control);
2119                 udelay(10);
2120                 val64 = readq(&bar0->gpio_control);
2121         }
2122 }
2123
2124 /**
2125  *  start_nic - Turns the device on
2126  *  @nic : device private variable.
2127  *  Description:
2128  *  This function actually turns the device on. Before this  function is
2129  *  called,all Registers are configured from their reset states
2130  *  and shared memory is allocated but the NIC is still quiescent. On
2131  *  calling this function, the device interrupts are cleared and the NIC is
2132  *  literally switched on by writing into the adapter control register.
2133  *  Return Value:
2134  *  SUCCESS on success and -1 on failure.
2135  */
2136
2137 static int start_nic(struct s2io_nic *nic)
2138 {
2139         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2140         struct net_device *dev = nic->dev;
2141         register u64 val64 = 0;
2142         u16 subid, i;
2143         struct mac_info *mac_control;
2144         struct config_param *config;
2145
2146         mac_control = &nic->mac_control;
2147         config = &nic->config;
2148
2149         /*  PRC Initialization and configuration */
2150         for (i = 0; i < config->rx_ring_num; i++) {
2151                 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
2152                        &bar0->prc_rxd0_n[i]);
2153
2154                 val64 = readq(&bar0->prc_ctrl_n[i]);
2155                 if (nic->config.bimodal)
2156                         val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
2157                 if (nic->rxd_mode == RXD_MODE_1)
2158                         val64 |= PRC_CTRL_RC_ENABLED;
2159                 else
2160                         val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2161                 if (nic->device_type == XFRAME_II_DEVICE)
2162                         val64 |= PRC_CTRL_GROUP_READS;
2163                 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2164                 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2165                 writeq(val64, &bar0->prc_ctrl_n[i]);
2166         }
2167
2168         if (nic->rxd_mode == RXD_MODE_3B) {
2169                 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2170                 val64 = readq(&bar0->rx_pa_cfg);
2171                 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2172                 writeq(val64, &bar0->rx_pa_cfg);
2173         }
2174
2175         if (vlan_tag_strip == 0) {
2176                 val64 = readq(&bar0->rx_pa_cfg);
2177                 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2178                 writeq(val64, &bar0->rx_pa_cfg);
2179                 vlan_strip_flag = 0;
2180         }
2181
2182         /*
2183          * Enabling MC-RLDRAM. After enabling the device, we timeout
2184          * for around 100ms, which is approximately the time required
2185          * for the device to be ready for operation.
2186          */
2187         val64 = readq(&bar0->mc_rldram_mrs);
2188         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2189         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2190         val64 = readq(&bar0->mc_rldram_mrs);
2191
2192         msleep(100);    /* Delay by around 100 ms. */
2193
2194         /* Enabling ECC Protection. */
2195         val64 = readq(&bar0->adapter_control);
2196         val64 &= ~ADAPTER_ECC_EN;
2197         writeq(val64, &bar0->adapter_control);
2198
2199         /*
2200          * Verify if the device is ready to be enabled, if so enable
2201          * it.
2202          */
2203         val64 = readq(&bar0->adapter_status);
2204         if (!verify_xena_quiescence(nic)) {
2205                 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2206                 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2207                           (unsigned long long) val64);
2208                 return FAILURE;
2209         }
2210
2211         /*
2212          * With some switches, link might be already up at this point.
2213          * Because of this weird behavior, when we enable laser,
2214          * we may not get link. We need to handle this. We cannot
2215          * figure out which switch is misbehaving. So we are forced to
2216          * make a global change.
2217          */
2218
2219         /* Enabling Laser. */
2220         val64 = readq(&bar0->adapter_control);
2221         val64 |= ADAPTER_EOI_TX_ON;
2222         writeq(val64, &bar0->adapter_control);
2223
2224         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2225                 /*
2226                  * Dont see link state interrupts initally on some switches,
2227                  * so directly scheduling the link state task here.
2228                  */
2229                 schedule_work(&nic->set_link_task);
2230         }
2231         /* SXE-002: Initialize link and activity LED */
2232         subid = nic->pdev->subsystem_device;
2233         if (((subid & 0xFF) >= 0x07) &&
2234             (nic->device_type == XFRAME_I_DEVICE)) {
2235                 val64 = readq(&bar0->gpio_control);
2236                 val64 |= 0x0000800000000000ULL;
2237                 writeq(val64, &bar0->gpio_control);
2238                 val64 = 0x0411040400000000ULL;
2239                 writeq(val64, (void __iomem *)bar0 + 0x2700);
2240         }
2241
2242         return SUCCESS;
2243 }
2244 /**
2245  * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2246  */
2247 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2248                                         TxD *txdlp, int get_off)
2249 {
2250         struct s2io_nic *nic = fifo_data->nic;
2251         struct sk_buff *skb;
2252         struct TxD *txds;
2253         u16 j, frg_cnt;
2254
2255         txds = txdlp;
2256         if (txds->Host_Control == (u64)(long)nic->ufo_in_band_v) {
2257                 pci_unmap_single(nic->pdev, (dma_addr_t)
2258                         txds->Buffer_Pointer, sizeof(u64),
2259                         PCI_DMA_TODEVICE);
2260                 txds++;
2261         }
2262
2263         skb = (struct sk_buff *) ((unsigned long)
2264                         txds->Host_Control);
2265         if (!skb) {
2266                 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2267                 return NULL;
2268         }
2269         pci_unmap_single(nic->pdev, (dma_addr_t)
2270                          txds->Buffer_Pointer,
2271                          skb->len - skb->data_len,
2272                          PCI_DMA_TODEVICE);
2273         frg_cnt = skb_shinfo(skb)->nr_frags;
2274         if (frg_cnt) {
2275                 txds++;
2276                 for (j = 0; j < frg_cnt; j++, txds++) {
2277                         skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2278                         if (!txds->Buffer_Pointer)
2279                                 break;
2280                         pci_unmap_page(nic->pdev, (dma_addr_t)
2281                                         txds->Buffer_Pointer,
2282                                        frag->size, PCI_DMA_TODEVICE);
2283                 }
2284         }
2285         memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
2286         return(skb);
2287 }
2288
2289 /**
2290  *  free_tx_buffers - Free all queued Tx buffers
2291  *  @nic : device private variable.
2292  *  Description:
2293  *  Free all queued Tx buffers.
2294  *  Return Value: void
2295 */
2296
2297 static void free_tx_buffers(struct s2io_nic *nic)
2298 {
2299         struct net_device *dev = nic->dev;
2300         struct sk_buff *skb;
2301         struct TxD *txdp;
2302         int i, j;
2303         struct mac_info *mac_control;
2304         struct config_param *config;
2305         int cnt = 0;
2306
2307         mac_control = &nic->mac_control;
2308         config = &nic->config;
2309
2310         for (i = 0; i < config->tx_fifo_num; i++) {
2311                 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2312                         txdp = (struct TxD *) \
2313                         mac_control->fifos[i].list_info[j].list_virt_addr;
2314                         skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2315                         if (skb) {
2316                                 nic->mac_control.stats_info->sw_stat.mem_freed 
2317                                         += skb->truesize;
2318                                 dev_kfree_skb(skb);
2319                                 cnt++;
2320                         }
2321                 }
2322                 DBG_PRINT(INTR_DBG,
2323                           "%s:forcibly freeing %d skbs on FIFO%d\n",
2324                           dev->name, cnt, i);
2325                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2326                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2327         }
2328 }
2329
2330 /**
2331  *   stop_nic -  To stop the nic
2332  *   @nic ; device private variable.
2333  *   Description:
2334  *   This function does exactly the opposite of what the start_nic()
2335  *   function does. This function is called to stop the device.
2336  *   Return Value:
2337  *   void.
2338  */
2339
2340 static void stop_nic(struct s2io_nic *nic)
2341 {
2342         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2343         register u64 val64 = 0;
2344         u16 interruptible;
2345         struct mac_info *mac_control;
2346         struct config_param *config;
2347
2348         mac_control = &nic->mac_control;
2349         config = &nic->config;
2350
2351         /*  Disable all interrupts */
2352         en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2353         interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2354         interruptible |= TX_PIC_INTR;
2355         en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2356
2357         /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2358         val64 = readq(&bar0->adapter_control);
2359         val64 &= ~(ADAPTER_CNTL_EN);
2360         writeq(val64, &bar0->adapter_control);
2361 }
2362
2363 /**
2364  *  fill_rx_buffers - Allocates the Rx side skbs
2365  *  @nic:  device private variable
2366  *  @ring_no: ring number
2367  *  Description:
2368  *  The function allocates Rx side skbs and puts the physical
2369  *  address of these buffers into the RxD buffer pointers, so that the NIC
2370  *  can DMA the received frame into these locations.
2371  *  The NIC supports 3 receive modes, viz
2372  *  1. single buffer,
2373  *  2. three buffer and
2374  *  3. Five buffer modes.
2375  *  Each mode defines how many fragments the received frame will be split
2376  *  up into by the NIC. The frame is split into L3 header, L4 Header,
2377  *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2378  *  is split into 3 fragments. As of now only single buffer mode is
2379  *  supported.
2380  *   Return Value:
2381  *  SUCCESS on success or an appropriate -ve value on failure.
2382  */
2383
2384 static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2385 {
2386         struct net_device *dev = nic->dev;
2387         struct sk_buff *skb;
2388         struct RxD_t *rxdp;
2389         int off, off1, size, block_no, block_no1;
2390         u32 alloc_tab = 0;
2391         u32 alloc_cnt;
2392         struct mac_info *mac_control;
2393         struct config_param *config;
2394         u64 tmp;
2395         struct buffAdd *ba;
2396         unsigned long flags;
2397         struct RxD_t *first_rxdp = NULL;
2398         u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2399         struct RxD1 *rxdp1;
2400         struct RxD3 *rxdp3;
2401         struct swStat *stats = &nic->mac_control.stats_info->sw_stat;
2402
2403         mac_control = &nic->mac_control;
2404         config = &nic->config;
2405         alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2406             atomic_read(&nic->rx_bufs_left[ring_no]);
2407
2408         block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
2409         off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2410         while (alloc_tab < alloc_cnt) {
2411                 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2412                     block_index;
2413                 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2414
2415                 rxdp = mac_control->rings[ring_no].
2416                                 rx_blocks[block_no].rxds[off].virt_addr;
2417
2418                 if ((block_no == block_no1) && (off == off1) &&
2419                                         (rxdp->Host_Control)) {
2420                         DBG_PRINT(INTR_DBG, "%s: Get and Put",
2421                                   dev->name);
2422                         DBG_PRINT(INTR_DBG, " info equated\n");
2423                         goto end;
2424                 }
2425                 if (off && (off == rxd_count[nic->rxd_mode])) {
2426                         mac_control->rings[ring_no].rx_curr_put_info.
2427                             block_index++;
2428                         if (mac_control->rings[ring_no].rx_curr_put_info.
2429                             block_index == mac_control->rings[ring_no].
2430                                         block_count)
2431                                 mac_control->rings[ring_no].rx_curr_put_info.
2432                                         block_index = 0;
2433                         block_no = mac_control->rings[ring_no].
2434                                         rx_curr_put_info.block_index;
2435                         if (off == rxd_count[nic->rxd_mode])
2436                                 off = 0;
2437                         mac_control->rings[ring_no].rx_curr_put_info.
2438                                 offset = off;
2439                         rxdp = mac_control->rings[ring_no].
2440                                 rx_blocks[block_no].block_virt_addr;
2441                         DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2442                                   dev->name, rxdp);
2443                 }
2444                 if(!napi) {
2445                         spin_lock_irqsave(&nic->put_lock, flags);
2446                         mac_control->rings[ring_no].put_pos =
2447                         (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2448                         spin_unlock_irqrestore(&nic->put_lock, flags);
2449                 } else {
2450                         mac_control->rings[ring_no].put_pos =
2451                         (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2452                 }
2453                 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2454                         ((nic->rxd_mode == RXD_MODE_3B) &&
2455                                 (rxdp->Control_2 & BIT(0)))) {
2456                         mac_control->rings[ring_no].rx_curr_put_info.
2457                                         offset = off;
2458                         goto end;
2459                 }
2460                 /* calculate size of skb based on ring mode */
2461                 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2462                                 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2463                 if (nic->rxd_mode == RXD_MODE_1)
2464                         size += NET_IP_ALIGN;
2465                 else
2466                         size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2467
2468                 /* allocate skb */
2469                 skb = dev_alloc_skb(size);
2470                 if(!skb) {
2471                         DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
2472                         DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n");
2473                         if (first_rxdp) {
2474                                 wmb();
2475                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2476                         }
2477                         nic->mac_control.stats_info->sw_stat. \
2478                                 mem_alloc_fail_cnt++;
2479                         return -ENOMEM ;
2480                 }
2481                 nic->mac_control.stats_info->sw_stat.mem_allocated 
2482                         += skb->truesize;
2483                 if (nic->rxd_mode == RXD_MODE_1) {
2484                         /* 1 buffer mode - normal operation mode */
2485                         rxdp1 = (struct RxD1*)rxdp;
2486                         memset(rxdp, 0, sizeof(struct RxD1));
2487                         skb_reserve(skb, NET_IP_ALIGN);
2488                         rxdp1->Buffer0_ptr = pci_map_single
2489                             (nic->pdev, skb->data, size - NET_IP_ALIGN,
2490                                 PCI_DMA_FROMDEVICE);
2491                         if( (rxdp1->Buffer0_ptr == 0) ||
2492                                 (rxdp1->Buffer0_ptr ==
2493                                 DMA_ERROR_CODE))
2494                                 goto pci_map_failed;
2495
2496                         rxdp->Control_2 = 
2497                                 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2498
2499                 } else if (nic->rxd_mode == RXD_MODE_3B) {
2500                         /*
2501                          * 2 buffer mode -
2502                          * 2 buffer mode provides 128
2503                          * byte aligned receive buffers.
2504                          */
2505
2506                         rxdp3 = (struct RxD3*)rxdp;
2507                         /* save buffer pointers to avoid frequent dma mapping */
2508                         Buffer0_ptr = rxdp3->Buffer0_ptr;
2509                         Buffer1_ptr = rxdp3->Buffer1_ptr;
2510                         memset(rxdp, 0, sizeof(struct RxD3));
2511                         /* restore the buffer pointers for dma sync*/
2512                         rxdp3->Buffer0_ptr = Buffer0_ptr;
2513                         rxdp3->Buffer1_ptr = Buffer1_ptr;
2514
2515                         ba = &mac_control->rings[ring_no].ba[block_no][off];
2516                         skb_reserve(skb, BUF0_LEN);
2517                         tmp = (u64)(unsigned long) skb->data;
2518                         tmp += ALIGN_SIZE;
2519                         tmp &= ~ALIGN_SIZE;
2520                         skb->data = (void *) (unsigned long)tmp;
2521                         skb_reset_tail_pointer(skb);
2522
2523                         if (!(rxdp3->Buffer0_ptr))
2524                                 rxdp3->Buffer0_ptr =
2525                                    pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2526                                            PCI_DMA_FROMDEVICE);
2527                         else
2528                                 pci_dma_sync_single_for_device(nic->pdev,
2529                                 (dma_addr_t) rxdp3->Buffer0_ptr,
2530                                     BUF0_LEN, PCI_DMA_FROMDEVICE);
2531                         if( (rxdp3->Buffer0_ptr == 0) ||
2532                                 (rxdp3->Buffer0_ptr == DMA_ERROR_CODE))
2533                                 goto pci_map_failed;
2534
2535                         rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2536                         if (nic->rxd_mode == RXD_MODE_3B) {
2537                                 /* Two buffer mode */
2538
2539                                 /*
2540                                  * Buffer2 will have L3/L4 header plus
2541                                  * L4 payload
2542                                  */
2543                                 rxdp3->Buffer2_ptr = pci_map_single
2544                                 (nic->pdev, skb->data, dev->mtu + 4,
2545                                                 PCI_DMA_FROMDEVICE);
2546
2547                                 if( (rxdp3->Buffer2_ptr == 0) ||
2548                                         (rxdp3->Buffer2_ptr == DMA_ERROR_CODE))
2549                                         goto pci_map_failed;
2550
2551                                 rxdp3->Buffer1_ptr =
2552                                                 pci_map_single(nic->pdev,
2553                                                 ba->ba_1, BUF1_LEN,
2554                                                 PCI_DMA_FROMDEVICE);
2555                                 if( (rxdp3->Buffer1_ptr == 0) ||
2556                                         (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
2557                                         pci_unmap_single
2558                                                 (nic->pdev,
2559                                                 (dma_addr_t)rxdp3->Buffer2_ptr,
2560                                                 dev->mtu + 4,
2561                                                 PCI_DMA_FROMDEVICE);
2562                                         goto pci_map_failed;
2563                                 }
2564                                 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2565                                 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2566                                                                 (dev->mtu + 4);
2567                         }
2568                         rxdp->Control_2 |= BIT(0);
2569                 }
2570                 rxdp->Host_Control = (unsigned long) (skb);
2571                 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2572                         rxdp->Control_1 |= RXD_OWN_XENA;
2573                 off++;
2574                 if (off == (rxd_count[nic->rxd_mode] + 1))
2575                         off = 0;
2576                 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2577
2578                 rxdp->Control_2 |= SET_RXD_MARKER;
2579                 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2580                         if (first_rxdp) {
2581                                 wmb();
2582                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2583                         }
2584                         first_rxdp = rxdp;
2585                 }
2586                 atomic_inc(&nic->rx_bufs_left[ring_no]);
2587                 alloc_tab++;
2588         }
2589
2590       end:
2591         /* Transfer ownership of first descriptor to adapter just before
2592          * exiting. Before that, use memory barrier so that ownership
2593          * and other fields are seen by adapter correctly.
2594          */
2595         if (first_rxdp) {
2596                 wmb();
2597                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2598         }
2599
2600         return SUCCESS;
2601 pci_map_failed:
2602         stats->pci_map_fail_cnt++;
2603         stats->mem_freed += skb->truesize;
2604         dev_kfree_skb_irq(skb);
2605         return -ENOMEM;
2606 }
2607
2608 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2609 {
2610         struct net_device *dev = sp->dev;
2611         int j;
2612         struct sk_buff *skb;
2613         struct RxD_t *rxdp;
2614         struct mac_info *mac_control;
2615         struct buffAdd *ba;
2616         struct RxD1 *rxdp1;
2617         struct RxD3 *rxdp3;
2618
2619         mac_control = &sp->mac_control;
2620         for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2621                 rxdp = mac_control->rings[ring_no].
2622                                 rx_blocks[blk].rxds[j].virt_addr;
2623                 skb = (struct sk_buff *)
2624                         ((unsigned long) rxdp->Host_Control);
2625                 if (!skb) {
2626                         continue;
2627                 }
2628                 if (sp->rxd_mode == RXD_MODE_1) {
2629                         rxdp1 = (struct RxD1*)rxdp;
2630                         pci_unmap_single(sp->pdev, (dma_addr_t)
2631                                 rxdp1->Buffer0_ptr,
2632                                 dev->mtu +
2633                                 HEADER_ETHERNET_II_802_3_SIZE
2634                                 + HEADER_802_2_SIZE +
2635                                 HEADER_SNAP_SIZE,
2636                                 PCI_DMA_FROMDEVICE);
2637                         memset(rxdp, 0, sizeof(struct RxD1));
2638                 } else if(sp->rxd_mode == RXD_MODE_3B) {
2639                         rxdp3 = (struct RxD3*)rxdp;
2640                         ba = &mac_control->rings[ring_no].
2641                                 ba[blk][j];
2642                         pci_unmap_single(sp->pdev, (dma_addr_t)
2643                                 rxdp3->Buffer0_ptr,
2644                                 BUF0_LEN,
2645                                 PCI_DMA_FROMDEVICE);
2646                         pci_unmap_single(sp->pdev, (dma_addr_t)
2647                                 rxdp3->Buffer1_ptr,
2648                                 BUF1_LEN,
2649                                 PCI_DMA_FROMDEVICE);
2650                         pci_unmap_single(sp->pdev, (dma_addr_t)
2651                                 rxdp3->Buffer2_ptr,
2652                                 dev->mtu + 4,
2653                                 PCI_DMA_FROMDEVICE);
2654                         memset(rxdp, 0, sizeof(struct RxD3));
2655                 }
2656                 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2657                 dev_kfree_skb(skb);
2658                 atomic_dec(&sp->rx_bufs_left[ring_no]);
2659         }
2660 }
2661
2662 /**
2663  *  free_rx_buffers - Frees all Rx buffers
2664  *  @sp: device private variable.
2665  *  Description:
2666  *  This function will free all Rx buffers allocated by host.
2667  *  Return Value:
2668  *  NONE.
2669  */
2670
2671 static void free_rx_buffers(struct s2io_nic *sp)
2672 {
2673         struct net_device *dev = sp->dev;
2674         int i, blk = 0, buf_cnt = 0;
2675         struct mac_info *mac_control;
2676         struct config_param *config;
2677
2678         mac_control = &sp->mac_control;
2679         config = &sp->config;
2680
2681         for (i = 0; i < config->rx_ring_num; i++) {
2682                 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2683                         free_rxd_blk(sp,i,blk);
2684
2685                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2686                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2687                 mac_control->rings[i].rx_curr_put_info.offset = 0;
2688                 mac_control->rings[i].rx_curr_get_info.offset = 0;
2689                 atomic_set(&sp->rx_bufs_left[i], 0);
2690                 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2691                           dev->name, buf_cnt, i);
2692         }
2693 }
2694
2695 /**
2696  * s2io_poll - Rx interrupt handler for NAPI support
2697  * @napi : pointer to the napi structure.
2698  * @budget : The number of packets that were budgeted to be processed
2699  * during  one pass through the 'Poll" function.
2700  * Description:
2701  * Comes into picture only if NAPI support has been incorporated. It does
2702  * the same thing that rx_intr_handler does, but not in a interrupt context
2703  * also It will process only a given number of packets.
2704  * Return value:
2705  * 0 on success and 1 if there are No Rx packets to be processed.
2706  */
2707
2708 static int s2io_poll(struct napi_struct *napi, int budget)
2709 {
2710         struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2711         struct net_device *dev = nic->dev;
2712         int pkt_cnt = 0, org_pkts_to_process;
2713         struct mac_info *mac_control;
2714         struct config_param *config;
2715         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2716         int i;
2717
2718         if (!is_s2io_card_up(nic))
2719                 return 0;
2720
2721         mac_control = &nic->mac_control;
2722         config = &nic->config;
2723
2724         nic->pkts_to_process = budget;
2725         org_pkts_to_process = nic->pkts_to_process;
2726
2727         writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
2728         readl(&bar0->rx_traffic_int);
2729
2730         for (i = 0; i < config->rx_ring_num; i++) {
2731                 rx_intr_handler(&mac_control->rings[i]);
2732                 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2733                 if (!nic->pkts_to_process) {
2734                         /* Quota for the current iteration has been met */
2735                         goto no_rx;
2736                 }
2737         }
2738
2739         netif_rx_complete(dev, napi);
2740
2741         for (i = 0; i < config->rx_ring_num; i++) {
2742                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2743                         DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2744                         DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2745                         break;
2746                 }
2747         }
2748         /* Re enable the Rx interrupts. */
2749         writeq(0x0, &bar0->rx_traffic_mask);
2750         readl(&bar0->rx_traffic_mask);
2751         return pkt_cnt;
2752
2753 no_rx:
2754         for (i = 0; i < config->rx_ring_num; i++) {
2755                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2756                         DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2757                         DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2758                         break;
2759                 }
2760         }
2761         return pkt_cnt;
2762 }
2763
2764 #ifdef CONFIG_NET_POLL_CONTROLLER
2765 /**
2766  * s2io_netpoll - netpoll event handler entry point
2767  * @dev : pointer to the device structure.
2768  * Description:
2769  *      This function will be called by upper layer to check for events on the
2770  * interface in situations where interrupts are disabled. It is used for
2771  * specific in-kernel networking tasks, such as remote consoles and kernel
2772  * debugging over the network (example netdump in RedHat).
2773  */
2774 static void s2io_netpoll(struct net_device *dev)
2775 {
2776         struct s2io_nic *nic = dev->priv;
2777         struct mac_info *mac_control;
2778         struct config_param *config;
2779         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2780         u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2781         int i;
2782
2783         if (pci_channel_offline(nic->pdev))
2784                 return;
2785
2786         disable_irq(dev->irq);
2787
2788         mac_control = &nic->mac_control;
2789         config = &nic->config;
2790
2791         writeq(val64, &bar0->rx_traffic_int);
2792         writeq(val64, &bar0->tx_traffic_int);
2793
2794         /* we need to free up the transmitted skbufs or else netpoll will
2795          * run out of skbs and will fail and eventually netpoll application such
2796          * as netdump will fail.
2797          */
2798         for (i = 0; i < config->tx_fifo_num; i++)
2799                 tx_intr_handler(&mac_control->fifos[i]);
2800
2801         /* check for received packet and indicate up to network */
2802         for (i = 0; i < config->rx_ring_num; i++)
2803                 rx_intr_handler(&mac_control->rings[i]);
2804
2805         for (i = 0; i < config->rx_ring_num; i++) {
2806                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2807                         DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2808                         DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
2809                         break;
2810                 }
2811         }
2812         enable_irq(dev->irq);
2813         return;
2814 }
2815 #endif
2816
2817 /**
2818  *  rx_intr_handler - Rx interrupt handler
2819  *  @nic: device private variable.
2820  *  Description:
2821  *  If the interrupt is because of a received frame or if the
2822  *  receive ring contains fresh as yet un-processed frames,this function is
2823  *  called. It picks out the RxD at which place the last Rx processing had
2824  *  stopped and sends the skb to the OSM's Rx handler and then increments
2825  *  the offset.
2826  *  Return Value:
2827  *  NONE.
2828  */
2829 static void rx_intr_handler(struct ring_info *ring_data)
2830 {
2831         struct s2io_nic *nic = ring_data->nic;
2832         struct net_device *dev = (struct net_device *) nic->dev;
2833         int get_block, put_block, put_offset;
2834         struct rx_curr_get_info get_info, put_info;
2835         struct RxD_t *rxdp;
2836         struct sk_buff *skb;
2837         int pkt_cnt = 0;
2838         int i;
2839         struct RxD1* rxdp1;
2840         struct RxD3* rxdp3;
2841
2842         spin_lock(&nic->rx_lock);
2843
2844         get_info = ring_data->rx_curr_get_info;
2845         get_block = get_info.block_index;
2846         memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2847         put_block = put_info.block_index;
2848         rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2849         if (!napi) {
2850                 spin_lock(&nic->put_lock);
2851                 put_offset = ring_data->put_pos;
2852                 spin_unlock(&nic->put_lock);
2853         } else
2854                 put_offset = ring_data->put_pos;
2855
2856         while (RXD_IS_UP2DT(rxdp)) {
2857                 /*
2858                  * If your are next to put index then it's
2859                  * FIFO full condition
2860                  */
2861                 if ((get_block == put_block) &&
2862                     (get_info.offset + 1) == put_info.offset) {
2863                         DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
2864                         break;
2865                 }
2866                 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2867                 if (skb == NULL) {
2868                         DBG_PRINT(ERR_DBG, "%s: The skb is ",
2869                                   dev->name);
2870                         DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2871                         spin_unlock(&nic->rx_lock);
2872                         return;
2873                 }
2874                 if (nic->rxd_mode == RXD_MODE_1) {
2875                         rxdp1 = (struct RxD1*)rxdp;
2876                         pci_unmap_single(nic->pdev, (dma_addr_t)
2877                                 rxdp1->Buffer0_ptr,
2878                                 dev->mtu +
2879                                 HEADER_ETHERNET_II_802_3_SIZE +
2880                                 HEADER_802_2_SIZE +
2881                                 HEADER_SNAP_SIZE,
2882                                 PCI_DMA_FROMDEVICE);
2883                 } else if (nic->rxd_mode == RXD_MODE_3B) {
2884                         rxdp3 = (struct RxD3*)rxdp;
2885                         pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2886                                 rxdp3->Buffer0_ptr,
2887                                 BUF0_LEN, PCI_DMA_FROMDEVICE);
2888                         pci_unmap_single(nic->pdev, (dma_addr_t)
2889                                 rxdp3->Buffer2_ptr,
2890                                 dev->mtu + 4,
2891                                 PCI_DMA_FROMDEVICE);
2892                 }
2893                 prefetch(skb->data);
2894                 rx_osm_handler(ring_data, rxdp);
2895                 get_info.offset++;
2896                 ring_data->rx_curr_get_info.offset = get_info.offset;
2897                 rxdp = ring_data->rx_blocks[get_block].
2898                                 rxds[get_info.offset].virt_addr;
2899                 if (get_info.offset == rxd_count[nic->rxd_mode]) {
2900                         get_info.offset = 0;
2901                         ring_data->rx_curr_get_info.offset = get_info.offset;
2902                         get_block++;
2903                         if (get_block == ring_data->block_count)
2904                                 get_block = 0;
2905                         ring_data->rx_curr_get_info.block_index = get_block;
2906                         rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2907                 }
2908
2909                 nic->pkts_to_process -= 1;
2910                 if ((napi) && (!nic->pkts_to_process))
2911                         break;
2912                 pkt_cnt++;
2913                 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2914                         break;
2915         }
2916         if (nic->lro) {
2917                 /* Clear all LRO sessions before exiting */
2918                 for (i=0; i<MAX_LRO_SESSIONS; i++) {
2919                         struct lro *lro = &nic->lro0_n[i];
2920                         if (lro->in_use) {
2921                                 update_L3L4_header(nic, lro);
2922                                 queue_rx_frame(lro->parent);
2923                                 clear_lro_session(lro);
2924                         }
2925                 }
2926         }
2927
2928         spin_unlock(&nic->rx_lock);
2929 }
2930
2931 /**
2932  *  tx_intr_handler - Transmit interrupt handler
2933  *  @nic : device private variable
2934  *  Description:
2935  *  If an interrupt was raised to indicate DMA complete of the
2936  *  Tx packet, this function is called. It identifies the last TxD
2937  *  whose buffer was freed and frees all skbs whose data have already
2938  *  DMA'ed into the NICs internal memory.
2939  *  Return Value:
2940  *  NONE
2941  */
2942
2943 static void tx_intr_handler(struct fifo_info *fifo_data)
2944 {
2945         struct s2io_nic *nic = fifo_data->nic;
2946         struct net_device *dev = (struct net_device *) nic->dev;
2947         struct tx_curr_get_info get_info, put_info;
2948         struct sk_buff *skb;
2949         struct TxD *txdlp;
2950         u8 err_mask;
2951
2952         get_info = fifo_data->tx_curr_get_info;
2953         memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
2954         txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
2955             list_virt_addr;
2956         while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2957                (get_info.offset != put_info.offset) &&
2958                (txdlp->Host_Control)) {
2959                 /* Check for TxD errors */
2960                 if (txdlp->Control_1 & TXD_T_CODE) {
2961                         unsigned long long err;
2962                         err = txdlp->Control_1 & TXD_T_CODE;
2963                         if (err & 0x1) {
2964                                 nic->mac_control.stats_info->sw_stat.
2965                                                 parity_err_cnt++;
2966                         }
2967
2968                         /* update t_code statistics */
2969                         err_mask = err >> 48;
2970                         switch(err_mask) {
2971                                 case 2:
2972                                         nic->mac_control.stats_info->sw_stat.
2973                                                         tx_buf_abort_cnt++;
2974                                 break;
2975
2976                                 case 3:
2977                                         nic->mac_control.stats_info->sw_stat.
2978                                                         tx_desc_abort_cnt++;
2979                                 break;
2980
2981                                 case 7:
2982                                         nic->mac_control.stats_info->sw_stat.
2983                                                         tx_parity_err_cnt++;
2984                                 break;
2985
2986                                 case 10:
2987                                         nic->mac_control.stats_info->sw_stat.
2988                                                         tx_link_loss_cnt++;
2989                                 break;
2990
2991                                 case 15:
2992                                         nic->mac_control.stats_info->sw_stat.
2993                                                         tx_list_proc_err_cnt++;
2994                                 break;
2995                         }
2996                 }
2997
2998                 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
2999                 if (skb == NULL) {
3000                         DBG_PRINT(ERR_DBG, "%s: Null skb ",
3001                         __FUNCTION__);
3002                         DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
3003                         return;
3004                 }
3005
3006                 /* Updating the statistics block */
3007                 nic->stats.tx_bytes += skb->len;
3008                 nic->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
3009                 dev_kfree_skb_irq(skb);
3010
3011                 get_info.offset++;
3012                 if (get_info.offset == get_info.fifo_len + 1)
3013                         get_info.offset = 0;
3014                 txdlp = (struct TxD *) fifo_data->list_info
3015                     [get_info.offset].list_virt_addr;
3016                 fifo_data->tx_curr_get_info.offset =
3017                     get_info.offset;
3018         }
3019
3020         spin_lock(&nic->tx_lock);
3021         if (netif_queue_stopped(dev))
3022                 netif_wake_queue(dev);
3023         spin_unlock(&nic->tx_lock);
3024 }
3025
3026 /**
3027  *  s2io_mdio_write - Function to write in to MDIO registers
3028  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3029  *  @addr     : address value
3030  *  @value    : data value
3031  *  @dev      : pointer to net_device structure
3032  *  Description:
3033  *  This function is used to write values to the MDIO registers
3034  *  NONE
3035  */
3036 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
3037 {
3038         u64 val64 = 0x0;
3039         struct s2io_nic *sp = dev->priv;
3040         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3041
3042         //address transaction
3043         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3044                         | MDIO_MMD_DEV_ADDR(mmd_type)
3045                         | MDIO_MMS_PRT_ADDR(0x0);
3046         writeq(val64, &bar0->mdio_control);
3047         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3048         writeq(val64, &bar0->mdio_control);
3049         udelay(100);
3050
3051         //Data transaction
3052         val64 = 0x0;
3053         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3054                         | MDIO_MMD_DEV_ADDR(mmd_type)
3055                         | MDIO_MMS_PRT_ADDR(0x0)
3056                         | MDIO_MDIO_DATA(value)
3057                         | MDIO_OP(MDIO_OP_WRITE_TRANS);
3058         writeq(val64, &bar0->mdio_control);
3059         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3060         writeq(val64, &bar0->mdio_control);
3061         udelay(100);
3062
3063         val64 = 0x0;
3064         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3065         | MDIO_MMD_DEV_ADDR(mmd_type)
3066         | MDIO_MMS_PRT_ADDR(0x0)
3067         | MDIO_OP(MDIO_OP_READ_TRANS);
3068         writeq(val64, &bar0->mdio_control);
3069         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3070         writeq(val64, &bar0->mdio_control);
3071         udelay(100);
3072
3073 }
3074
3075 /**
3076  *  s2io_mdio_read - Function to write in to MDIO registers
3077  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3078  *  @addr     : address value
3079  *  @dev      : pointer to net_device structure
3080  *  Description:
3081  *  This function is used to read values to the MDIO registers
3082  *  NONE
3083  */
3084 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3085 {
3086         u64 val64 = 0x0;
3087         u64 rval64 = 0x0;
3088         struct s2io_nic *sp = dev->priv;
3089         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3090
3091         /* address transaction */
3092         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3093                         | MDIO_MMD_DEV_ADDR(mmd_type)
3094                         | MDIO_MMS_PRT_ADDR(0x0);
3095         writeq(val64, &bar0->mdio_control);
3096         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3097         writeq(val64, &bar0->mdio_control);
3098         udelay(100);
3099
3100         /* Data transaction */
3101         val64 = 0x0;
3102         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3103                         | MDIO_MMD_DEV_ADDR(mmd_type)
3104                         | MDIO_MMS_PRT_ADDR(0x0)
3105                         | MDIO_OP(MDIO_OP_READ_TRANS);
3106         writeq(val64, &bar0->mdio_control);
3107         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3108         writeq(val64, &bar0->mdio_control);
3109         udelay(100);
3110
3111         /* Read the value from regs */
3112         rval64 = readq(&bar0->mdio_control);
3113         rval64 = rval64 & 0xFFFF0000;
3114         rval64 = rval64 >> 16;
3115         return rval64;
3116 }
3117 /**
3118  *  s2io_chk_xpak_counter - Function to check the status of the xpak counters
3119  *  @counter      : couter value to be updated
3120  *  @flag         : flag to indicate the status
3121  *  @type         : counter type
3122  *  Description:
3123  *  This function is to check the status of the xpak counters value
3124  *  NONE
3125  */
3126
3127 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
3128 {
3129         u64 mask = 0x3;
3130         u64 val64;
3131         int i;
3132         for(i = 0; i <index; i++)
3133                 mask = mask << 0x2;
3134
3135         if(flag > 0)
3136         {
3137                 *counter = *counter + 1;
3138                 val64 = *regs_stat & mask;
3139                 val64 = val64 >> (index * 0x2);
3140                 val64 = val64 + 1;
3141                 if(val64 == 3)
3142                 {
3143                         switch(type)
3144                         {
3145                         case 1:
3146                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3147                                           "service. Excessive temperatures may "
3148                                           "result in premature transceiver "
3149                                           "failure \n");
3150                         break;
3151                         case 2:
3152                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3153                                           "service Excessive bias currents may "
3154                                           "indicate imminent laser diode "
3155                                           "failure \n");
3156                         break;
3157                         case 3:
3158                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3159                                           "service Excessive laser output "
3160                                           "power may saturate far-end "
3161                                           "receiver\n");
3162                         break;
3163                         default:
3164                                 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
3165                                           "type \n");
3166                         }
3167                         val64 = 0x0;
3168                 }
3169                 val64 = val64 << (index * 0x2);
3170                 *regs_stat = (*regs_stat & (~mask)) | (val64);
3171
3172         } else {
3173                 *regs_stat = *regs_stat & (~mask);
3174         }
3175 }
3176
3177 /**
3178  *  s2io_updt_xpak_counter - Function to update the xpak counters
3179  *  @dev         : pointer to net_device struct
3180  *  Description:
3181  *  This function is to upate the status of the xpak counters value
3182  *  NONE
3183  */
3184 static void s2io_updt_xpak_counter(struct net_device *dev)
3185 {
3186         u16 flag  = 0x0;
3187         u16 type  = 0x0;
3188         u16 val16 = 0x0;
3189         u64 val64 = 0x0;
3190         u64 addr  = 0x0;
3191
3192         struct s2io_nic *sp = dev->priv;
3193         struct stat_block *stat_info = sp->mac_control.stats_info;
3194
3195         /* Check the communication with the MDIO slave */
3196         addr = 0x0000;
3197         val64 = 0x0;
3198         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3199         if((val64 == 0xFFFF) || (val64 == 0x0000))
3200         {
3201                 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3202                           "Returned %llx\n", (unsigned long long)val64);
3203                 return;
3204         }
3205
3206         /* Check for the expecte value of 2040 at PMA address 0x0000 */
3207         if(val64 != 0x2040)
3208         {
3209                 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3210                 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3211                           (unsigned long long)val64);
3212                 return;
3213         }
3214
3215         /* Loading the DOM register to MDIO register */
3216         addr = 0xA100;
3217         s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3218         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3219
3220         /* Reading the Alarm flags */
3221         addr = 0xA070;
3222         val64 = 0x0;
3223         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3224
3225         flag = CHECKBIT(val64, 0x7);
3226         type = 1;
3227         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3228                                 &stat_info->xpak_stat.xpak_regs_stat,
3229                                 0x0, flag, type);
3230
3231         if(CHECKBIT(val64, 0x6))
3232                 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3233
3234         flag = CHECKBIT(val64, 0x3);
3235         type = 2;
3236         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3237                                 &stat_info->xpak_stat.xpak_regs_stat,
3238                                 0x2, flag, type);
3239
3240         if(CHECKBIT(val64, 0x2))
3241                 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3242
3243         flag = CHECKBIT(val64, 0x1);
3244         type = 3;
3245         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3246                                 &stat_info->xpak_stat.xpak_regs_stat,
3247                                 0x4, flag, type);
3248
3249         if(CHECKBIT(val64, 0x0))
3250                 stat_info->xpak_stat.alarm_laser_output_power_low++;
3251
3252         /* Reading the Warning flags */
3253         addr = 0xA074;
3254         val64 = 0x0;
3255         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3256
3257         if(CHECKBIT(val64, 0x7))
3258                 stat_info->xpak_stat.warn_transceiver_temp_high++;
3259
3260         if(CHECKBIT(val64, 0x6))
3261                 stat_info->xpak_stat.warn_transceiver_temp_low++;
3262
3263         if(CHECKBIT(val64, 0x3))
3264                 stat_info->xpak_stat.warn_laser_bias_current_high++;
3265
3266         if(CHECKBIT(val64, 0x2))
3267                 stat_info->xpak_stat.warn_laser_bias_current_low++;
3268
3269         if(CHECKBIT(val64, 0x1))
3270                 stat_info->xpak_stat.warn_laser_output_power_high++;
3271
3272         if(CHECKBIT(val64, 0x0))
3273                 stat_info->xpak_stat.warn_laser_output_power_low++;
3274 }
3275
3276 /**
3277  *  wait_for_cmd_complete - waits for a command to complete.
3278  *  @sp : private member of the device structure, which is a pointer to the
3279  *  s2io_nic structure.
3280  *  Description: Function that waits for a command to Write into RMAC
3281  *  ADDR DATA registers to be completed and returns either success or
3282  *  error depending on whether the command was complete or not.
3283  *  Return value:
3284  *   SUCCESS on success and FAILURE on failure.
3285  */
3286
3287 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3288                                 int bit_state)
3289 {
3290         int ret = FAILURE, cnt = 0, delay = 1;
3291         u64 val64;
3292
3293         if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3294                 return FAILURE;
3295
3296         do {
3297                 val64 = readq(addr);
3298                 if (bit_state == S2IO_BIT_RESET) {
3299                         if (!(val64 & busy_bit)) {
3300                                 ret = SUCCESS;
3301                                 break;
3302                         }
3303                 } else {
3304                         if (!(val64 & busy_bit)) {
3305                                 ret = SUCCESS;
3306                                 break;
3307                         }
3308                 }
3309
3310                 if(in_interrupt())
3311                         mdelay(delay);
3312                 else
3313                         msleep(delay);
3314
3315                 if (++cnt >= 10)
3316                         delay = 50;
3317         } while (cnt < 20);
3318         return ret;
3319 }
3320 /*
3321  * check_pci_device_id - Checks if the device id is supported
3322  * @id : device id
3323  * Description: Function to check if the pci device id is supported by driver.
3324  * Return value: Actual device id if supported else PCI_ANY_ID
3325  */
3326 static u16 check_pci_device_id(u16 id)
3327 {
3328         switch (id) {
3329         case PCI_DEVICE_ID_HERC_WIN:
3330         case PCI_DEVICE_ID_HERC_UNI:
3331                 return XFRAME_II_DEVICE;
3332         case PCI_DEVICE_ID_S2IO_UNI:
3333         case PCI_DEVICE_ID_S2IO_WIN:
3334                 return XFRAME_I_DEVICE;
3335         default:
3336                 return PCI_ANY_ID;
3337         }
3338 }
3339
3340 /**
3341  *  s2io_reset - Resets the card.
3342  *  @sp : private member of the device structure.
3343  *  Description: Function to Reset the card. This function then also
3344  *  restores the previously saved PCI configuration space registers as
3345  *  the card reset also resets the configuration space.
3346  *  Return value:
3347  *  void.
3348  */
3349
3350 static void s2io_reset(struct s2io_nic * sp)
3351 {
3352         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3353         u64 val64;
3354         u16 subid, pci_cmd;
3355         int i;
3356         u16 val16;
3357         unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3358         unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3359
3360         DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3361                         __FUNCTION__, sp->dev->name);
3362
3363         /* Back up  the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3364         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3365
3366         val64 = SW_RESET_ALL;
3367         writeq(val64, &bar0->sw_reset);
3368         if (strstr(sp->product_name, "CX4")) {
3369                 msleep(750);
3370         }
3371         msleep(250);
3372         for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3373
3374                 /* Restore the PCI state saved during initialization. */
3375                 pci_restore_state(sp->pdev);
3376                 pci_read_config_word(sp->pdev, 0x2, &val16);
3377                 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3378                         break;
3379                 msleep(200);
3380         }
3381
3382         if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3383                 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
3384         }
3385
3386         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3387
3388         s2io_init_pci(sp);
3389
3390         /* Set swapper to enable I/O register access */
3391         s2io_set_swapper(sp);
3392
3393         /* Restore the MSIX table entries from local variables */
3394         restore_xmsi_data(sp);
3395
3396         /* Clear certain PCI/PCI-X fields after reset */
3397         if (sp->device_type == XFRAME_II_DEVICE) {
3398                 /* Clear "detected parity error" bit */
3399                 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3400
3401                 /* Clearing PCIX Ecc status register */
3402                 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3403
3404                 /* Clearing PCI_STATUS error reflected here */
3405                 writeq(BIT(62), &bar0->txpic_int_reg);
3406         }
3407
3408         /* Reset device statistics maintained by OS */
3409         memset(&sp->stats, 0, sizeof (struct net_device_stats));
3410         
3411         up_cnt = sp->mac_control.stats_info->sw_stat.link_up_cnt;
3412         down_cnt = sp->mac_control.stats_info->sw_stat.link_down_cnt;
3413         up_time = sp->mac_control.stats_info->sw_stat.link_up_time;
3414         down_time = sp->mac_control.stats_info->sw_stat.link_down_time;
3415         reset_cnt = sp->mac_control.stats_info->sw_stat.soft_reset_cnt;
3416         mem_alloc_cnt = sp->mac_control.stats_info->sw_stat.mem_allocated;
3417         mem_free_cnt = sp->mac_control.stats_info->sw_stat.mem_freed;
3418         watchdog_cnt = sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt;
3419         /* save link up/down time/cnt, reset/memory/watchdog cnt */
3420         memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
3421         /* restore link up/down time/cnt, reset/memory/watchdog cnt */
3422         sp->mac_control.stats_info->sw_stat.link_up_cnt = up_cnt;
3423         sp->mac_control.stats_info->sw_stat.link_down_cnt = down_cnt;
3424         sp->mac_control.stats_info->sw_stat.link_up_time = up_time;
3425         sp->mac_control.stats_info->sw_stat.link_down_time = down_time;
3426         sp->mac_control.stats_info->sw_stat.soft_reset_cnt = reset_cnt;
3427         sp->mac_control.stats_info->sw_stat.mem_allocated = mem_alloc_cnt;
3428         sp->mac_control.stats_info->sw_stat.mem_freed = mem_free_cnt;
3429         sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt = watchdog_cnt;
3430
3431         /* SXE-002: Configure link and activity LED to turn it off */
3432         subid = sp->pdev->subsystem_device;
3433         if (((subid & 0xFF) >= 0x07) &&
3434             (sp->device_type == XFRAME_I_DEVICE)) {
3435                 val64 = readq(&bar0->gpio_control);
3436                 val64 |= 0x0000800000000000ULL;
3437                 writeq(val64, &bar0->gpio_control);
3438                 val64 = 0x0411040400000000ULL;
3439                 writeq(val64, (void __iomem *)bar0 + 0x2700);
3440         }
3441
3442         /*
3443          * Clear spurious ECC interrupts that would have occured on
3444          * XFRAME II cards after reset.
3445          */
3446         if (sp->device_type == XFRAME_II_DEVICE) {
3447                 val64 = readq(&bar0->pcc_err_reg);
3448                 writeq(val64, &bar0->pcc_err_reg);
3449         }
3450
3451         /* restore the previously assigned mac address */
3452         s2io_set_mac_addr(sp->dev, (u8 *)&sp->def_mac_addr[0].mac_addr);
3453
3454         sp->device_enabled_once = FALSE;
3455 }
3456
3457 /**
3458  *  s2io_set_swapper - to set the swapper controle on the card
3459  *  @sp : private member of the device structure,
3460  *  pointer to the s2io_nic structure.
3461  *  Description: Function to set the swapper control on the card
3462  *  correctly depending on the 'endianness' of the system.
3463  *  Return value:
3464  *  SUCCESS on success and FAILURE on failure.
3465  */
3466
3467 static int s2io_set_swapper(struct s2io_nic * sp)
3468 {
3469         struct net_device *dev = sp->dev;
3470         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3471         u64 val64, valt, valr;
3472
3473         /*
3474          * Set proper endian settings and verify the same by reading
3475          * the PIF Feed-back register.
3476          */
3477
3478         val64 = readq(&bar0->pif_rd_swapper_fb);
3479         if (val64 != 0x0123456789ABCDEFULL) {
3480                 int i = 0;
3481                 u64 value[] = { 0xC30000C3C30000C3ULL,   /* FE=1, SE=1 */
3482                                 0x8100008181000081ULL,  /* FE=1, SE=0 */
3483                                 0x4200004242000042ULL,  /* FE=0, SE=1 */
3484                                 0};                     /* FE=0, SE=0 */
3485
3486                 while(i<4) {
3487                         writeq(value[i], &bar0->swapper_ctrl);
3488                         val64 = readq(&bar0->pif_rd_swapper_fb);
3489                         if (val64 == 0x0123456789ABCDEFULL)
3490                                 break;
3491                         i++;
3492                 }
3493                 if (i == 4) {
3494                         DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3495                                 dev->name);
3496                         DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3497                                 (unsigned long long) val64);
3498                         return FAILURE;
3499                 }
3500                 valr = value[i];
3501         } else {
3502                 valr = readq(&bar0->swapper_ctrl);
3503         }
3504
3505         valt = 0x0123456789ABCDEFULL;
3506         writeq(valt, &bar0->xmsi_address);
3507         val64 = readq(&bar0->xmsi_address);
3508
3509         if(val64 != valt) {
3510                 int i = 0;
3511                 u64 value[] = { 0x00C3C30000C3C300ULL,  /* FE=1, SE=1 */
3512                                 0x0081810000818100ULL,  /* FE=1, SE=0 */
3513                                 0x0042420000424200ULL,  /* FE=0, SE=1 */
3514                                 0};                     /* FE=0, SE=0 */
3515
3516                 while(i<4) {
3517                         writeq((value[i] | valr), &bar0->swapper_ctrl);
3518                         writeq(valt, &bar0->xmsi_address);
3519                         val64 = readq(&bar0->xmsi_address);
3520                         if(val64 == valt)
3521                                 break;
3522                         i++;
3523                 }
3524                 if(i == 4) {
3525                         unsigned long long x = val64;
3526                         DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3527                         DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3528                         return FAILURE;
3529                 }
3530         }
3531         val64 = readq(&bar0->swapper_ctrl);
3532         val64 &= 0xFFFF000000000000ULL;
3533
3534 #ifdef  __BIG_ENDIAN
3535         /*
3536          * The device by default set to a big endian format, so a
3537          * big endian driver need not set anything.
3538          */
3539         val64 |= (SWAPPER_CTRL_TXP_FE |
3540                  SWAPPER_CTRL_TXP_SE |
3541                  SWAPPER_CTRL_TXD_R_FE |
3542                  SWAPPER_CTRL_TXD_W_FE |
3543                  SWAPPER_CTRL_TXF_R_FE |
3544                  SWAPPER_CTRL_RXD_R_FE |
3545                  SWAPPER_CTRL_RXD_W_FE |
3546                  SWAPPER_CTRL_RXF_W_FE |
3547                  SWAPPER_CTRL_XMSI_FE |
3548                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3549         if (sp->config.intr_type == INTA)
3550                 val64 |= SWAPPER_CTRL_XMSI_SE;
3551         writeq(val64, &bar0->swapper_ctrl);
3552 #else
3553         /*
3554          * Initially we enable all bits to make it accessible by the
3555          * driver, then we selectively enable only those bits that
3556          * we want to set.
3557          */
3558         val64 |= (SWAPPER_CTRL_TXP_FE |
3559                  SWAPPER_CTRL_TXP_SE |
3560                  SWAPPER_CTRL_TXD_R_FE |
3561                  SWAPPER_CTRL_TXD_R_SE |
3562                  SWAPPER_CTRL_TXD_W_FE |
3563                  SWAPPER_CTRL_TXD_W_SE |
3564                  SWAPPER_CTRL_TXF_R_FE |
3565                  SWAPPER_CTRL_RXD_R_FE |
3566                  SWAPPER_CTRL_RXD_R_SE |
3567                  SWAPPER_CTRL_RXD_W_FE |
3568                  SWAPPER_CTRL_RXD_W_SE |
3569                  SWAPPER_CTRL_RXF_W_FE |
3570                  SWAPPER_CTRL_XMSI_FE |
3571                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3572         if (sp->config.intr_type == INTA)
3573                 val64 |= SWAPPER_CTRL_XMSI_SE;
3574         writeq(val64, &bar0->swapper_ctrl);
3575 #endif
3576         val64 = readq(&bar0->swapper_ctrl);
3577
3578         /*
3579          * Verifying if endian settings are accurate by reading a
3580          * feedback register.
3581          */
3582         val64 = readq(&bar0->pif_rd_swapper_fb);
3583         if (val64 != 0x0123456789ABCDEFULL) {
3584                 /* Endian settings are incorrect, calls for another dekko. */
3585                 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3586                           dev->name);
3587                 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3588                           (unsigned long long) val64);
3589                 return FAILURE;
3590         }
3591
3592         return SUCCESS;
3593 }
3594
3595 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3596 {
3597         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3598         u64 val64;
3599         int ret = 0, cnt = 0;
3600
3601         do {
3602                 val64 = readq(&bar0->xmsi_access);
3603                 if (!(val64 & BIT(15)))
3604                         break;
3605                 mdelay(1);
3606                 cnt++;
3607         } while(cnt < 5);
3608         if (cnt == 5) {
3609                 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3610                 ret = 1;
3611         }
3612
3613         return ret;
3614 }
3615
3616 static void restore_xmsi_data(struct s2io_nic *nic)
3617 {
3618         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3619         u64 val64;
3620         int i;
3621
3622         for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3623                 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3624                 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3625                 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6));
3626                 writeq(val64, &bar0->xmsi_access);
3627                 if (wait_for_msix_trans(nic, i)) {
3628                         DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3629                         continue;
3630                 }
3631         }
3632 }
3633
3634 static void store_xmsi_data(struct s2io_nic *nic)
3635 {
3636         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3637         u64 val64, addr, data;
3638         int i;
3639
3640         /* Store and display */
3641         for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3642                 val64 = (BIT(15) | vBIT(i, 26, 6));
3643                 writeq(val64, &bar0->xmsi_access);
3644                 if (wait_for_msix_trans(nic, i)) {
3645                         DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3646                         continue;
3647                 }
3648                 addr = readq(&bar0->xmsi_address);
3649                 data = readq(&bar0->xmsi_data);
3650                 if (addr && data) {
3651                         nic->msix_info[i].addr = addr;
3652                         nic->msix_info[i].data = data;
3653                 }
3654         }
3655 }
3656
3657 static int s2io_enable_msi_x(struct s2io_nic *nic)
3658 {
3659         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3660         u64 tx_mat, rx_mat;
3661         u16 msi_control; /* Temp variable */
3662         int ret, i, j, msix_indx = 1;
3663
3664         nic->entries = kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct msix_entry),
3665                                GFP_KERNEL);
3666         if (nic->entries == NULL) {
3667                 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \
3668                         __FUNCTION__);
3669                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3670                 return -ENOMEM;
3671         }
3672         nic->mac_control.stats_info->sw_stat.mem_allocated 
3673                 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3674         memset(nic->entries, 0,MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3675
3676         nic->s2io_entries =
3677                 kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry),
3678                                    GFP_KERNEL);
3679         if (nic->s2io_entries == NULL) {
3680                 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", 
3681                         __FUNCTION__);
3682                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3683                 kfree(nic->entries);
3684                 nic->mac_control.stats_info->sw_stat.mem_freed 
3685                         += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3686                 return -ENOMEM;
3687         }
3688          nic->mac_control.stats_info->sw_stat.mem_allocated 
3689                 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3690         memset(nic->s2io_entries, 0,
3691                MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3692
3693         for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3694                 nic->entries[i].entry = i;
3695                 nic->s2io_entries[i].entry = i;
3696                 nic->s2io_entries[i].arg = NULL;
3697                 nic->s2io_entries[i].in_use = 0;
3698         }
3699
3700         tx_mat = readq(&bar0->tx_mat0_n[0]);
3701         for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
3702                 tx_mat |= TX_MAT_SET(i, msix_indx);
3703                 nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
3704                 nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
3705                 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3706         }
3707         writeq(tx_mat, &bar0->tx_mat0_n[0]);
3708
3709         if (!nic->config.bimodal) {
3710                 rx_mat = readq(&bar0->rx_mat);
3711                 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3712                         rx_mat |= RX_MAT_SET(j, msix_indx);
3713                         nic->s2io_entries[msix_indx].arg 
3714                                 = &nic->mac_control.rings[j];
3715                         nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3716                         nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3717                 }
3718                 writeq(rx_mat, &bar0->rx_mat);
3719         } else {
3720                 tx_mat = readq(&bar0->tx_mat0_n[7]);
3721                 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3722                         tx_mat |= TX_MAT_SET(i, msix_indx);
3723                         nic->s2io_entries[msix_indx].arg 
3724                                 = &nic->mac_control.rings[j];
3725                         nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3726                         nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3727                 }
3728                 writeq(tx_mat, &bar0->tx_mat0_n[7]);
3729         }
3730
3731         nic->avail_msix_vectors = 0;
3732         ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
3733         /* We fail init if error or we get less vectors than min required */
3734         if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) {
3735                 nic->avail_msix_vectors = ret;
3736                 ret = pci_enable_msix(nic->pdev, nic->entries, ret);
3737         }
3738         if (ret) {
3739                 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3740                 kfree(nic->entries);
3741                 nic->mac_control.stats_info->sw_stat.mem_freed 
3742                         += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3743                 kfree(nic->s2io_entries);
3744                 nic->mac_control.stats_info->sw_stat.mem_freed 
3745                 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3746                 nic->entries = NULL;
3747                 nic->s2io_entries = NULL;
3748                 nic->avail_msix_vectors = 0;
3749                 return -ENOMEM;
3750         }
3751         if (!nic->avail_msix_vectors)
3752                 nic->avail_msix_vectors = MAX_REQUESTED_MSI_X;
3753
3754         /*
3755          * To enable MSI-X, MSI also needs to be enabled, due to a bug
3756          * in the herc NIC. (Temp change, needs to be removed later)
3757          */
3758         pci_read_config_word(nic->pdev, 0x42, &msi_control);
3759         msi_control |= 0x1; /* Enable MSI */
3760         pci_write_config_word(nic->pdev, 0x42, msi_control);
3761
3762         return 0;
3763 }
3764
3765 /* Handle software interrupt used during MSI(X) test */
3766 static irqreturn_t __devinit s2io_test_intr(int irq, void *dev_id)
3767 {
3768         struct s2io_nic *sp = dev_id;
3769
3770         sp->msi_detected = 1;
3771         wake_up(&sp->msi_wait);
3772
3773         return IRQ_HANDLED;
3774 }
3775
3776 /* Test interrupt path by forcing a a software IRQ */
3777 static int __devinit s2io_test_msi(struct s2io_nic *sp)
3778 {
3779         struct pci_dev *pdev = sp->pdev;
3780         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3781         int err;
3782         u64 val64, saved64;
3783
3784         err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3785                         sp->name, sp);
3786         if (err) {
3787                 DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3788                        sp->dev->name, pci_name(pdev), pdev->irq);
3789                 return err;
3790         }
3791
3792         init_waitqueue_head (&sp->msi_wait);
3793         sp->msi_detected = 0;
3794
3795         saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3796         val64 |= SCHED_INT_CTRL_ONE_SHOT;
3797         val64 |= SCHED_INT_CTRL_TIMER_EN;
3798         val64 |= SCHED_INT_CTRL_INT2MSI(1);
3799         writeq(val64, &bar0->scheduled_int_ctrl);
3800
3801         wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3802
3803         if (!sp->msi_detected) {
3804                 /* MSI(X) test failed, go back to INTx mode */
3805                 DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated"
3806                         "using MSI(X) during test\n", sp->dev->name,
3807                         pci_name(pdev));
3808
3809                 err = -EOPNOTSUPP;
3810         }
3811
3812         free_irq(sp->entries[1].vector, sp);
3813
3814         writeq(saved64, &bar0->scheduled_int_ctrl);
3815
3816         return err;
3817 }
3818 /* ********************************************************* *
3819  * Functions defined below concern the OS part of the driver *
3820  * ********************************************************* */
3821
3822 /**
3823  *  s2io_open - open entry point of the driver
3824  *  @dev : pointer to the device structure.
3825  *  Description:
3826  *  This function is the open entry point of the driver. It mainly calls a
3827  *  function to allocate Rx buffers and inserts them into the buffer
3828  *  descriptors and then enables the Rx part of the NIC.
3829  *  Return value:
3830  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3831  *   file on failure.
3832  */
3833
3834 static int s2io_open(struct net_device *dev)
3835 {
3836         struct s2io_nic *sp = dev->priv;
3837         int err = 0;
3838
3839         /*
3840          * Make sure you have link off by default every time
3841          * Nic is initialized
3842          */
3843         netif_carrier_off(dev);
3844         sp->last_link_state = 0;
3845
3846         napi_enable(&sp->napi);
3847
3848         if (sp->config.intr_type == MSI_X) {
3849                 int ret = s2io_enable_msi_x(sp);
3850
3851                 if (!ret) {
3852                         u16 msi_control;
3853
3854                         ret = s2io_test_msi(sp);
3855
3856                         /* rollback MSI-X, will re-enable during add_isr() */
3857                         kfree(sp->entries);
3858                         sp->mac_control.stats_info->sw_stat.mem_freed +=
3859                                 (MAX_REQUESTED_MSI_X *
3860                                 sizeof(struct msix_entry));
3861                         kfree(sp->s2io_entries);
3862                         sp->mac_control.stats_info->sw_stat.mem_freed +=
3863                                 (MAX_REQUESTED_MSI_X *
3864                                 sizeof(struct s2io_msix_entry));
3865                         sp->entries = NULL;
3866                         sp->s2io_entries = NULL;
3867
3868                         pci_read_config_word(sp->pdev, 0x42, &msi_control);
3869                         msi_control &= 0xFFFE; /* Disable MSI */
3870                         pci_write_config_word(sp->pdev, 0x42, msi_control);
3871
3872                         pci_disable_msix(sp->pdev);
3873
3874                 }
3875                 if (ret) {
3876
3877                         DBG_PRINT(ERR_DBG,
3878                           "%s: MSI-X requested but failed to enable\n",
3879                           dev->name);
3880                         sp->config.intr_type = INTA;
3881                 }
3882         }
3883
3884         /* NAPI doesn't work well with MSI(X) */
3885          if (sp->config.intr_type != INTA) {
3886                 if(sp->config.napi)
3887                         sp->config.napi = 0;
3888         }
3889
3890         /* Initialize H/W and enable interrupts */
3891         err = s2io_card_up(sp);
3892         if (err) {
3893                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3894                           dev->name);
3895                 goto hw_init_failed;
3896         }
3897
3898         if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
3899                 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3900                 s2io_card_down(sp);
3901                 err = -ENODEV;
3902                 goto hw_init_failed;
3903         }
3904
3905         netif_start_queue(dev);
3906         return 0;
3907
3908 hw_init_failed:
3909         napi_disable(&sp->napi);
3910         if (sp->config.intr_type == MSI_X) {
3911                 if (sp->entries) {
3912                         kfree(sp->entries);
3913                         sp->mac_control.stats_info->sw_stat.mem_freed 
3914                         += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3915                 }
3916                 if (sp->s2io_entries) {
3917                         kfree(sp->s2io_entries);
3918                         sp->mac_control.stats_info->sw_stat.mem_freed 
3919                         += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3920                 }
3921         }
3922         return err;
3923 }
3924
3925 /**
3926  *  s2io_close -close entry point of the driver
3927  *  @dev : device pointer.
3928  *  Description:
3929  *  This is the stop entry point of the driver. It needs to undo exactly
3930  *  whatever was done by the open entry point,thus it's usually referred to
3931  *  as the close function.Among other things this function mainly stops the
3932  *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3933  *  Return value:
3934  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3935  *  file on failure.
3936  */
3937
3938 static int s2io_close(struct net_device *dev)
3939 {
3940         struct s2io_nic *sp = dev->priv;
3941
3942         netif_stop_queue(dev);
3943         napi_disable(&sp->napi);
3944         /* Reset card, kill tasklet and free Tx and Rx buffers. */
3945         s2io_card_down(sp);
3946
3947         return 0;
3948 }
3949
3950 /**
3951  *  s2io_xmit - Tx entry point of te driver
3952  *  @skb : the socket buffer containing the Tx data.
3953  *  @dev : device pointer.
3954  *  Description :
3955  *  This function is the Tx entry point of the driver. S2IO NIC supports
3956  *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
3957  *  NOTE: when device cant queue the pkt,just the trans_start variable will
3958  *  not be upadted.
3959  *  Return value:
3960  *  0 on success & 1 on failure.
3961  */
3962
3963 static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3964 {
3965         struct s2io_nic *sp = dev->priv;
3966         u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3967         register u64 val64;
3968         struct TxD *txdp;
3969         struct TxFIFO_element __iomem *tx_fifo;
3970         unsigned long flags;
3971         u16 vlan_tag = 0;
3972         int vlan_priority = 0;
3973         struct mac_info *mac_control;
3974         struct config_param *config;
3975         int offload_type;
3976         struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
3977
3978         mac_control = &sp->mac_control;
3979         config = &sp->config;
3980
3981         DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
3982
3983         if (unlikely(skb->len <= 0)) {
3984                 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
3985                 dev_kfree_skb_any(skb);
3986                 return 0;
3987 }
3988
3989         spin_lock_irqsave(&sp->tx_lock, flags);
3990         if (!is_s2io_card_up(sp)) {
3991                 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
3992                           dev->name);
3993                 spin_unlock_irqrestore(&sp->tx_lock, flags);
3994                 dev_kfree_skb(skb);
3995                 return 0;
3996         }
3997
3998         queue = 0;
3999         /* Get Fifo number to Transmit based on vlan priority */
4000         if (sp->vlgrp && vlan_tx_tag_present(skb)) {
4001                 vlan_tag = vlan_tx_tag_get(skb);
4002                 vlan_priority = vlan_tag >> 13;
4003                 queue = config->fifo_mapping[vlan_priority];
4004         }
4005
4006         put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
4007         get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
4008         txdp = (struct TxD *) mac_control->fifos[queue].list_info[put_off].
4009                 list_virt_addr;
4010
4011         queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
4012         /* Avoid "put" pointer going beyond "get" pointer */
4013         if (txdp->Host_Control ||
4014                    ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4015                 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4016                 netif_stop_queue(dev);
4017                 dev_kfree_skb(skb);
4018                 spin_unlock_irqrestore(&sp->tx_lock, flags);
4019                 return 0;
4020         }
4021
4022         offload_type = s2io_offload_type(skb);
4023         if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4024                 txdp->Control_1 |= TXD_TCP_LSO_EN;
4025                 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4026         }
4027         if (skb->ip_summed == CHECKSUM_PARTIAL) {
4028                 txdp->Control_2 |=
4029                     (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
4030                      TXD_TX_CKO_UDP_EN);
4031         }
4032         txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4033         txdp->Control_1 |= TXD_LIST_OWN_XENA;
4034         txdp->Control_2 |= config->tx_intr_type;
4035
4036         if (sp->vlgrp && vlan_tx_tag_present(skb)) {
4037                 txdp->Control_2 |= TXD_VLAN_ENABLE;
4038                 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4039         }
4040
4041         frg_len = skb->len - skb->data_len;
4042         if (offload_type == SKB_GSO_UDP) {
4043                 int ufo_size;
4044
4045                 ufo_size = s2io_udp_mss(skb);
4046                 ufo_size &= ~7;
4047                 txdp->Control_1 |= TXD_UFO_EN;
4048                 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4049                 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4050 #ifdef __BIG_ENDIAN
4051                 sp->ufo_in_band_v[put_off] =
4052                                 (u64)skb_shinfo(skb)->ip6_frag_id;
4053 #else
4054                 sp->ufo_in_band_v[put_off] =
4055                                 (u64)skb_shinfo(skb)->ip6_frag_id << 32;
4056 #endif
4057                 txdp->Host_Control = (unsigned long)sp->ufo_in_band_v;
4058                 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
4059                                         sp->ufo_in_band_v,
4060                                         sizeof(u64), PCI_DMA_TODEVICE);
4061                 if((txdp->Buffer_Pointer == 0) ||
4062                         (txdp->Buffer_Pointer == DMA_ERROR_CODE))
4063                         goto pci_map_failed;
4064                 txdp++;
4065         }
4066
4067         txdp->Buffer_Pointer = pci_map_single
4068             (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
4069         if((txdp->Buffer_Pointer == 0) ||
4070                 (txdp->Buffer_Pointer == DMA_ERROR_CODE))
4071                 goto pci_map_failed;
4072
4073         txdp->Host_Control = (unsigned long) skb;
4074         txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4075         if (offload_type == SKB_GSO_UDP)
4076                 txdp->Control_1 |= TXD_UFO_EN;
4077
4078         frg_cnt = skb_shinfo(skb)->nr_frags;
4079         /* For fragmented SKB. */
4080         for (i = 0; i < frg_cnt; i++) {
4081                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4082                 /* A '0' length fragment will be ignored */
4083                 if (!frag->size)
4084                         continue;
4085                 txdp++;
4086                 txdp->Buffer_Pointer = (u64) pci_map_page
4087                     (sp->pdev, frag->page, frag->page_offset,
4088                      frag->size, PCI_DMA_TODEVICE);
4089                 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
4090                 if (offload_type == SKB_GSO_UDP)
4091                         txdp->Control_1 |= TXD_UFO_EN;
4092         }
4093         txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4094
4095         if (offload_type == SKB_GSO_UDP)
4096                 frg_cnt++; /* as Txd0 was used for inband header */
4097
4098         tx_fifo = mac_control->tx_FIFO_start[queue];
4099         val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
4100         writeq(val64, &tx_fifo->TxDL_Pointer);
4101
4102         val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4103                  TX_FIFO_LAST_LIST);
4104         if (offload_type)
4105                 val64 |= TX_FIFO_SPECIAL_FUNC;
4106
4107         writeq(val64, &tx_fifo->List_Control);
4108
4109         mmiowb();
4110
4111         put_off++;
4112         if (put_off == mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1)
4113                 put_off = 0;
4114         mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
4115
4116         /* Avoid "put" pointer going beyond "get" pointer */
4117         if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4118                 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
4119                 DBG_PRINT(TX_DBG,
4120                           "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4121                           put_off, get_off);
4122                 netif_stop_queue(dev);
4123         }
4124         mac_control->stats_info->sw_stat.mem_allocated += skb->truesize;
4125         dev->trans_start = jiffies;
4126         spin_unlock_irqrestore(&sp->tx_lock, flags);
4127
4128         return 0;
4129 pci_map_failed:
4130         stats->pci_map_fail_cnt++;
4131         netif_stop_queue(dev);
4132         stats->mem_freed += skb->truesize;
4133         dev_kfree_skb(skb);
4134         spin_unlock_irqrestore(&sp->tx_lock, flags);
4135         return 0;
4136 }
4137
4138 static void
4139 s2io_alarm_handle(unsigned long data)
4140 {
4141         struct s2io_nic *sp = (struct s2io_nic *)data;
4142         struct net_device *dev = sp->dev;
4143
4144         s2io_handle_errors(dev);
4145         mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4146 }
4147
4148 static int s2io_chk_rx_buffers(struct s2io_nic *sp, int rng_n)
4149 {
4150         int rxb_size, level;
4151
4152         if (!sp->lro) {
4153                 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
4154                 level = rx_buffer_level(sp, rxb_size, rng_n);
4155
4156                 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4157                         int ret;
4158                         DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
4159                         DBG_PRINT(INTR_DBG, "PANIC levels\n");
4160                         if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
4161                                 DBG_PRINT(INFO_DBG, "Out of memory in %s",
4162                                           __FUNCTION__);
4163                                 clear_bit(0, (&sp->tasklet_status));
4164                                 return -1;
4165                         }
4166                         clear_bit(0, (&sp->tasklet_status));
4167                 } else if (level == LOW)
4168                         tasklet_schedule(&sp->task);
4169
4170         } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
4171                         DBG_PRINT(INFO_DBG, "%s:Out of memory", sp->dev->name);
4172                         DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
4173         }
4174         return 0;
4175 }
4176
4177 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4178 {
4179         struct ring_info *ring = (struct ring_info *)dev_id;
4180         struct s2io_nic *sp = ring->nic;
4181
4182         if (!is_s2io_card_up(sp))
4183                 return IRQ_HANDLED;
4184
4185         rx_intr_handler(ring);
4186         s2io_chk_rx_buffers(sp, ring->ring_no);
4187
4188         return IRQ_HANDLED;
4189 }
4190
4191 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4192 {
4193         struct fifo_info *fifo = (struct fifo_info *)dev_id;
4194         struct s2io_nic *sp = fifo->nic;
4195
4196         if (!is_s2io_card_up(sp))
4197                 return IRQ_HANDLED;
4198
4199         tx_intr_handler(fifo);
4200         return IRQ_HANDLED;
4201 }
4202 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4203 {
4204         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4205         u64 val64;
4206
4207         val64 = readq(&bar0->pic_int_status);
4208         if (val64 & PIC_INT_GPIO) {
4209                 val64 = readq(&bar0->gpio_int_reg);
4210                 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4211                     (val64 & GPIO_INT_REG_LINK_UP)) {
4212                         /*
4213                          * This is unstable state so clear both up/down
4214                          * interrupt and adapter to re-evaluate the link state.
4215                          */
4216                         val64 |=  GPIO_INT_REG_LINK_DOWN;
4217                         val64 |= GPIO_INT_REG_LINK_UP;
4218                         writeq(val64, &bar0->gpio_int_reg);
4219                         val64 = readq(&bar0->gpio_int_mask);
4220                         val64 &= ~(GPIO_INT_MASK_LINK_UP |
4221                                    GPIO_INT_MASK_LINK_DOWN);
4222                         writeq(val64, &bar0->gpio_int_mask);
4223                 }
4224                 else if (val64 & GPIO_INT_REG_LINK_UP) {
4225                         val64 = readq(&bar0->adapter_status);
4226                                 /* Enable Adapter */
4227                         val64 = readq(&bar0->adapter_control);
4228                         val64 |= ADAPTER_CNTL_EN;
4229                         writeq(val64, &bar0->adapter_control);
4230                         val64 |= ADAPTER_LED_ON;
4231                         writeq(val64, &bar0->adapter_control);
4232                         if (!sp->device_enabled_once)
4233                                 sp->device_enabled_once = 1;
4234
4235                         s2io_link(sp, LINK_UP);
4236                         /*
4237                          * unmask link down interrupt and mask link-up
4238                          * intr
4239                          */
4240                         val64 = readq(&bar0->gpio_int_mask);
4241                         val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4242                         val64 |= GPIO_INT_MASK_LINK_UP;
4243                         writeq(val64, &bar0->gpio_int_mask);
4244
4245                 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4246                         val64 = readq(&bar0->adapter_status);
4247                         s2io_link(sp, LINK_DOWN);
4248                         /* Link is down so unmaks link up interrupt */
4249                         val64 = readq(&bar0->gpio_int_mask);
4250                         val64 &= ~GPIO_INT_MASK_LINK_UP;
4251                         val64 |= GPIO_INT_MASK_LINK_DOWN;
4252                         writeq(val64, &bar0->gpio_int_mask);
4253
4254                         /* turn off LED */
4255                         val64 = readq(&bar0->adapter_control);
4256                         val64 = val64 &(~ADAPTER_LED_ON);
4257                         writeq(val64, &bar0->adapter_control);
4258                 }
4259         }
4260         val64 = readq(&bar0->gpio_int_mask);
4261 }
4262
4263 /**
4264  *  do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4265  *  @value: alarm bits
4266  *  @addr: address value
4267  *  @cnt: counter variable
4268  *  Description: Check for alarm and increment the counter
4269  *  Return Value:
4270  *  1 - if alarm bit set
4271  *  0 - if alarm bit is not set
4272  */
4273 int do_s2io_chk_alarm_bit(u64 value, void __iomem * addr,
4274                           unsigned long long *cnt)
4275 {
4276         u64 val64;
4277         val64 = readq(addr);
4278         if ( val64 & value ) {
4279                 writeq(val64, addr);
4280                 (*cnt)++;
4281                 return 1;
4282         }
4283         return 0;
4284
4285 }
4286
4287 /**
4288  *  s2io_handle_errors - Xframe error indication handler
4289  *  @nic: device private variable
4290  *  Description: Handle alarms such as loss of link, single or
4291  *  double ECC errors, critical and serious errors.
4292  *  Return Value:
4293  *  NONE
4294  */
4295 static void s2io_handle_errors(void * dev_id)
4296 {
4297         struct net_device *dev = (struct net_device *) dev_id;
4298         struct s2io_nic *sp = dev->priv;
4299         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4300         u64 temp64 = 0,val64=0;
4301         int i = 0;
4302
4303         struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4304         struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4305
4306         if (!is_s2io_card_up(sp))
4307                 return;
4308
4309         if (pci_channel_offline(sp->pdev))
4310                 return;
4311
4312         memset(&sw_stat->ring_full_cnt, 0,
4313                 sizeof(sw_stat->ring_full_cnt));
4314
4315         /* Handling the XPAK counters update */
4316         if(stats->xpak_timer_count < 72000) {
4317                 /* waiting for an hour */
4318                 stats->xpak_timer_count++;
4319         } else {
4320                 s2io_updt_xpak_counter(dev);
4321                 /* reset the count to zero */
4322                 stats->xpak_timer_count = 0;
4323         }
4324
4325         /* Handling link status change error Intr */
4326         if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4327                 val64 = readq(&bar0->mac_rmac_err_reg);
4328                 writeq(val64, &bar0->mac_rmac_err_reg);
4329                 if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4330                         schedule_work(&sp->set_link_task);
4331         }
4332
4333         /* In case of a serious error, the device will be Reset. */
4334         if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4335                                 &sw_stat->serious_err_cnt))
4336                 goto reset;
4337
4338         /* Check for data parity error */
4339         if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4340                                 &sw_stat->parity_err_cnt))
4341                 goto reset;
4342
4343         /* Check for ring full counter */
4344         if (sp->device_type == XFRAME_II_DEVICE) {
4345                 val64 = readq(&bar0->ring_bump_counter1);
4346                 for (i=0; i<4; i++) {
4347                         temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4348                         temp64 >>= 64 - ((i+1)*16);
4349                         sw_stat->ring_full_cnt[i] += temp64;
4350                 }
4351
4352                 val64 = readq(&bar0->ring_bump_counter2);
4353                 for (i=0; i<4; i++) {
4354                         temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4355                         temp64 >>= 64 - ((i+1)*16);
4356                          sw_stat->ring_full_cnt[i+4] += temp64;
4357                 }
4358         }
4359
4360         val64 = readq(&bar0->txdma_int_status);
4361         /*check for pfc_err*/
4362         if (val64 & TXDMA_PFC_INT) {
4363                 if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM|
4364                                 PFC_MISC_0_ERR | PFC_MISC_1_ERR|
4365                                 PFC_PCIX_ERR, &bar0->pfc_err_reg,
4366                                 &sw_stat->pfc_err_cnt))
4367                         goto reset;
4368                 do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR, &bar0->pfc_err_reg,
4369                                 &sw_stat->pfc_err_cnt);
4370         }
4371
4372         /*check for tda_err*/
4373         if (val64 & TXDMA_TDA_INT) {
4374                 if(do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
4375                                 TDA_SM1_ERR_ALARM, &bar0->tda_err_reg,
4376                                 &sw_stat->tda_err_cnt))
4377                         goto reset;
4378                 do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4379                                 &bar0->tda_err_reg, &sw_stat->tda_err_cnt);
4380         }
4381         /*check for pcc_err*/
4382         if (val64 & TXDMA_PCC_INT) {
4383                 if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM
4384                                 | PCC_N_SERR | PCC_6_COF_OV_ERR
4385                                 | PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR
4386                                 | PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR
4387                                 | PCC_TXB_ECC_DB_ERR, &bar0->pcc_err_reg,
4388                                 &sw_stat->pcc_err_cnt))
4389                         goto reset;
4390                 do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4391                                 &bar0->pcc_err_reg, &sw_stat->pcc_err_cnt);
4392         }
4393
4394         /*check for tti_err*/
4395         if (val64 & TXDMA_TTI_INT) {
4396                 if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM, &bar0->tti_err_reg,
4397                                 &sw_stat->tti_err_cnt))
4398                         goto reset;
4399                 do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4400                                 &bar0->tti_err_reg, &sw_stat->tti_err_cnt);
4401         }
4402
4403         /*check for lso_err*/
4404         if (val64 & TXDMA_LSO_INT) {
4405                 if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT
4406                                 | LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4407                                 &bar0->lso_err_reg, &sw_stat->lso_err_cnt))
4408                         goto reset;
4409                 do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4410                                 &bar0->lso_err_reg, &sw_stat->lso_err_cnt);
4411         }
4412
4413         /*check for tpa_err*/
4414         if (val64 & TXDMA_TPA_INT) {
4415                 if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM, &bar0->tpa_err_reg,
4416                         &sw_stat->tpa_err_cnt))
4417                         goto reset;
4418                 do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP, &bar0->tpa_err_reg,
4419                         &sw_stat->tpa_err_cnt);
4420         }
4421
4422         /*check for sm_err*/
4423         if (val64 & TXDMA_SM_INT) {
4424                 if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM, &bar0->sm_err_reg,
4425                         &sw_stat->sm_err_cnt))
4426                         goto reset;
4427         }
4428
4429         val64 = readq(&bar0->mac_int_status);
4430         if (val64 & MAC_INT_STATUS_TMAC_INT) {
4431                 if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4432                                 &bar0->mac_tmac_err_reg,
4433                                 &sw_stat->mac_tmac_err_cnt))
4434                         goto reset;
4435                 do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR
4436                                 | TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
4437                                 &bar0->mac_tmac_err_reg,
4438                                 &sw_stat->mac_tmac_err_cnt);
4439         }
4440
4441         val64 = readq(&bar0->xgxs_int_status);
4442         if (val64 & XGXS_INT_STATUS_TXGXS) {
4443                 if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4444                                 &bar0->xgxs_txgxs_err_reg,
4445                                 &sw_stat->xgxs_txgxs_err_cnt))
4446                         goto reset;
4447                 do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4448                                 &bar0->xgxs_txgxs_err_reg,
4449                                 &sw_stat->xgxs_txgxs_err_cnt);
4450         }
4451
4452         val64 = readq(&bar0->rxdma_int_status);
4453         if (val64 & RXDMA_INT_RC_INT_M) {
4454                 if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR
4455                                 | RC_PRCn_SM_ERR_ALARM |RC_FTC_SM_ERR_ALARM,
4456                                 &bar0->rc_err_reg, &sw_stat->rc_err_cnt))
4457                         goto reset;
4458                 do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR
4459                                 | RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4460                                 &sw_stat->rc_err_cnt);
4461                 if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn
4462                                 | PRC_PCI_AB_F_WR_Rn, &bar0->prc_pcix_err_reg,
4463                                 &sw_stat->prc_pcix_err_cnt))
4464                         goto reset;
4465                 do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn | PRC_PCI_DP_WR_Rn
4466                                 | PRC_PCI_DP_F_WR_Rn, &bar0->prc_pcix_err_reg,
4467                                 &sw_stat->prc_pcix_err_cnt);
4468         }
4469
4470         if (val64 & RXDMA_INT_RPA_INT_M) {
4471                 if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4472                                 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt))
4473                         goto reset;
4474                 do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4475                                 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt);
4476         }
4477
4478         if (val64 & RXDMA_INT_RDA_INT_M) {
4479                 if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR
4480                                 | RDA_FRM_ECC_DB_N_AERR | RDA_SM1_ERR_ALARM
4481                                 | RDA_SM0_ERR_ALARM | RDA_RXD_ECC_DB_SERR,
4482                                 &bar0->rda_err_reg, &sw_stat->rda_err_cnt))
4483                         goto reset;
4484                 do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR | RDA_FRM_ECC_SG_ERR
4485                                 | RDA_MISC_ERR | RDA_PCIX_ERR,
4486                                 &bar0->rda_err_reg, &sw_stat->rda_err_cnt);
4487         }
4488
4489         if (val64 & RXDMA_INT_RTI_INT_M) {
4490                 if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM, &bar0->rti_err_reg,
4491                                 &sw_stat->rti_err_cnt))
4492                         goto reset;
4493                 do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4494                                 &bar0->rti_err_reg, &sw_stat->rti_err_cnt);
4495         }
4496
4497         val64 = readq(&bar0->mac_int_status);
4498         if (val64 & MAC_INT_STATUS_RMAC_INT) {
4499                 if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4500                                 &bar0->mac_rmac_err_reg,
4501                                 &sw_stat->mac_rmac_err_cnt))
4502                         goto reset;
4503                 do_s2io_chk_alarm_bit(RMAC_UNUSED_INT|RMAC_SINGLE_ECC_ERR|
4504                                 RMAC_DOUBLE_ECC_ERR, &bar0->mac_rmac_err_reg,
4505                                 &sw_stat->mac_rmac_err_cnt);
4506         }
4507
4508         val64 = readq(&bar0->xgxs_int_status);
4509         if (val64 & XGXS_INT_STATUS_RXGXS) {
4510                 if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4511                                 &bar0->xgxs_rxgxs_err_reg,
4512                                 &sw_stat->xgxs_rxgxs_err_cnt))
4513                         goto reset;
4514         }
4515
4516         val64 = readq(&bar0->mc_int_status);
4517         if(val64 & MC_INT_STATUS_MC_INT) {
4518                 if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR, &bar0->mc_err_reg,
4519                                 &sw_stat->mc_err_cnt))
4520                         goto reset;
4521
4522                 /* Handling Ecc errors */
4523                 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4524                         writeq(val64, &bar0->mc_err_reg);
4525                         if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4526                                 sw_stat->double_ecc_errs++;
4527                                 if (sp->device_type != XFRAME_II_DEVICE) {
4528                                         /*
4529                                          * Reset XframeI only if critical error
4530                                          */
4531                                         if (val64 &
4532                                                 (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4533                                                 MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4534                                                                 goto reset;
4535                                         }
4536                         } else
4537                                 sw_stat->single_ecc_errs++;
4538                 }
4539         }
4540         return;
4541
4542 reset:
4543         netif_stop_queue(dev);
4544         schedule_work(&sp->rst_timer_task);
4545         sw_stat->soft_reset_cnt++;
4546         return;
4547 }
4548
4549 /**
4550  *  s2io_isr - ISR handler of the device .
4551  *  @irq: the irq of the device.
4552  *  @dev_id: a void pointer to the dev structure of the NIC.
4553  *  Description:  This function is the ISR handler of the device. It
4554  *  identifies the reason for the interrupt and calls the relevant
4555  *  service routines. As a contongency measure, this ISR allocates the
4556  *  recv buffers, if their numbers are below the panic value which is
4557  *  presently set to 25% of the original number of rcv buffers allocated.
4558  *  Return value:
4559  *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
4560  *   IRQ_NONE: will be returned if interrupt is not from our device
4561  */
4562 static irqreturn_t s2io_isr(int irq, void *dev_id)
4563 {
4564         struct net_device *dev = (struct net_device *) dev_id;
4565         struct s2io_nic *sp = dev->priv;
4566         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4567         int i;
4568         u64 reason = 0;
4569         struct mac_info *mac_control;
4570         struct config_param *config;
4571
4572         /* Pretend we handled any irq's from a disconnected card */
4573         if (pci_channel_offline(sp->pdev))
4574                 return IRQ_NONE;
4575
4576         if (!is_s2io_card_up(sp))
4577                 return IRQ_NONE;
4578
4579         mac_control = &sp->mac_control;
4580         config = &sp->config;
4581
4582         /*
4583          * Identify the cause for interrupt and call the appropriate
4584          * interrupt handler. Causes for the interrupt could be;
4585          * 1. Rx of packet.
4586          * 2. Tx complete.
4587          * 3. Link down.
4588          */
4589         reason = readq(&bar0->general_int_status);
4590
4591         if (unlikely(reason == S2IO_MINUS_ONE) ) {
4592                 /* Nothing much can be done. Get out */
4593                 return IRQ_HANDLED;
4594         }
4595
4596         if (reason & (GEN_INTR_RXTRAFFIC |
4597                 GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC))
4598         {
4599                 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4600
4601                 if (config->napi) {
4602                         if (reason & GEN_INTR_RXTRAFFIC) {
4603                                 if (likely(netif_rx_schedule_prep(dev,
4604                                                         &sp->napi))) {
4605                                         __netif_rx_schedule(dev, &sp->napi);
4606                                         writeq(S2IO_MINUS_ONE,
4607                                                &bar0->rx_traffic_mask);
4608                                 } else
4609                                         writeq(S2IO_MINUS_ONE,
4610                                                &bar0->rx_traffic_int);
4611                         }
4612                 } else {
4613                         /*
4614                          * rx_traffic_int reg is an R1 register, writing all 1's
4615                          * will ensure that the actual interrupt causing bit
4616                          * get's cleared and hence a read can be avoided.
4617                          */
4618                         if (reason & GEN_INTR_RXTRAFFIC)
4619                                 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4620
4621                         for (i = 0; i < config->rx_ring_num; i++)
4622                                 rx_intr_handler(&mac_control->rings[i]);
4623                 }
4624
4625                 /*
4626                  * tx_traffic_int reg is an R1 register, writing all 1's
4627                  * will ensure that the actual interrupt causing bit get's
4628                  * cleared and hence a read can be avoided.
4629                  */
4630                 if (reason & GEN_INTR_TXTRAFFIC)
4631                         writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4632
4633                 for (i = 0; i < config->tx_fifo_num; i++)
4634                         tx_intr_handler(&mac_control->fifos[i]);
4635
4636                 if (reason & GEN_INTR_TXPIC)
4637                         s2io_txpic_intr_handle(sp);
4638
4639                 /*
4640                  * Reallocate the buffers from the interrupt handler itself.
4641                  */
4642                 if (!config->napi) {
4643                         for (i = 0; i < config->rx_ring_num; i++)
4644                                 s2io_chk_rx_buffers(sp, i);
4645                 }
4646                 writeq(sp->general_int_mask, &bar0->general_int_mask);
4647                 readl(&bar0->general_int_status);
4648
4649                 return IRQ_HANDLED;
4650
4651         }
4652         else if (!reason) {
4653                 /* The interrupt was not raised by us */
4654                 return IRQ_NONE;
4655         }
4656
4657         return IRQ_HANDLED;
4658 }
4659
4660 /**
4661  * s2io_updt_stats -
4662  */
4663 static void s2io_updt_stats(struct s2io_nic *sp)
4664 {
4665         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4666         u64 val64;
4667         int cnt = 0;
4668
4669         if (is_s2io_card_up(sp)) {
4670                 /* Apprx 30us on a 133 MHz bus */
4671                 val64 = SET_UPDT_CLICKS(10) |
4672                         STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4673                 writeq(val64, &bar0->stat_cfg);
4674                 do {
4675                         udelay(100);
4676                         val64 = readq(&bar0->stat_cfg);
4677                         if (!(val64 & BIT(0)))
4678                                 break;
4679                         cnt++;
4680                         if (cnt == 5)
4681                                 break; /* Updt failed */
4682                 } while(1);
4683         } 
4684 }
4685
4686 /**
4687  *  s2io_get_stats - Updates the device statistics structure.
4688  *  @dev : pointer to the device structure.
4689  *  Description:
4690  *  This function updates the device statistics structure in the s2io_nic
4691  *  structure and returns a pointer to the same.
4692  *  Return value:
4693  *  pointer to the updated net_device_stats structure.
4694  */
4695
4696 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4697 {
4698         struct s2io_nic *sp = dev->priv;
4699         struct mac_info *mac_control;
4700         struct config_param *config;
4701
4702
4703         mac_control = &sp->mac_control;
4704         config = &sp->config;
4705
4706         /* Configure Stats for immediate updt */
4707         s2io_updt_stats(sp);
4708
4709         sp->stats.tx_packets =
4710                 le32_to_cpu(mac_control->stats_info->tmac_frms);
4711         sp->stats.tx_errors =
4712                 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4713         sp->stats.rx_errors =
4714                 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
4715         sp->stats.multicast =
4716                 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
4717         sp->stats.rx_length_errors =
4718                 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
4719
4720         return (&sp->stats);
4721 }
4722
4723 /**
4724  *  s2io_set_multicast - entry point for multicast address enable/disable.
4725  *  @dev : pointer to the device structure
4726  *  Description:
4727  *  This function is a driver entry point which gets called by the kernel
4728  *  whenever multicast addresses must be enabled/disabled. This also gets
4729  *  called to set/reset promiscuous mode. Depending on the deivce flag, we
4730  *  determine, if multicast address must be enabled or if promiscuous mode
4731  *  is to be disabled etc.
4732  *  Return value:
4733  *  void.
4734  */
4735
4736 static void s2io_set_multicast(struct net_device *dev)
4737 {
4738         int i, j, prev_cnt;
4739         struct dev_mc_list *mclist;
4740         struct s2io_nic *sp = dev->priv;
4741         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4742         u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4743             0xfeffffffffffULL;
4744         u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
4745         void __iomem *add;
4746
4747         if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4748                 /*  Enable all Multicast addresses */
4749                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4750                        &bar0->rmac_addr_data0_mem);
4751                 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4752                        &bar0->rmac_addr_data1_mem);
4753                 val64 = RMAC_ADDR_CMD_MEM_WE |
4754                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4755                     RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
4756                 writeq(val64, &bar0->rmac_addr_cmd_mem);
4757                 /* Wait till command completes */
4758                 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4759                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4760                                         S2IO_BIT_RESET);
4761
4762                 sp->m_cast_flg = 1;
4763                 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
4764         } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4765                 /*  Disable all Multicast addresses */
4766                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4767                        &bar0->rmac_addr_data0_mem);
4768                 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4769                        &bar0->rmac_addr_data1_mem);
4770                 val64 = RMAC_ADDR_CMD_MEM_WE |
4771                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4772                     RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4773                 writeq(val64, &bar0->rmac_addr_cmd_mem);
4774                 /* Wait till command completes */
4775                 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4776                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4777                                         S2IO_BIT_RESET);
4778
4779                 sp->m_cast_flg = 0;
4780                 sp->all_multi_pos = 0;
4781         }
4782
4783         if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4784                 /*  Put the NIC into promiscuous mode */
4785                 add = &bar0->mac_cfg;
4786                 val64 = readq(&bar0->mac_cfg);
4787                 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4788
4789                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4790                 writel((u32) val64, add);
4791                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4792                 writel((u32) (val64 >> 32), (add + 4));
4793
4794                 if (vlan_tag_strip != 1) {
4795                         val64 = readq(&bar0->rx_pa_cfg);
4796                         val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4797                         writeq(val64, &bar0->rx_pa_cfg);
4798                         vlan_strip_flag = 0;
4799                 }
4800
4801                 val64 = readq(&bar0->mac_cfg);
4802                 sp->promisc_flg = 1;
4803                 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4804                           dev->name);
4805         } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4806                 /*  Remove the NIC from promiscuous mode */
4807                 add = &bar0->mac_cfg;
4808                 val64 = readq(&bar0->mac_cfg);
4809                 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4810
4811                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4812                 writel((u32) val64, add);
4813                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4814                 writel((u32) (val64 >> 32), (add + 4));
4815
4816                 if (vlan_tag_strip != 0) {
4817                         val64 = readq(&bar0->rx_pa_cfg);
4818                         val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4819                         writeq(val64, &bar0->rx_pa_cfg);
4820                         vlan_strip_flag = 1;
4821                 }
4822
4823                 val64 = readq(&bar0->mac_cfg);
4824                 sp->promisc_flg = 0;
4825                 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
4826                           dev->name);
4827         }
4828
4829         /*  Update individual M_CAST address list */
4830         if ((!sp->m_cast_flg) && dev->mc_count) {
4831                 if (dev->mc_count >
4832                     (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
4833                         DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
4834                                   dev->name);
4835                         DBG_PRINT(ERR_DBG, "can be added, please enable ");
4836                         DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
4837                         return;
4838                 }
4839
4840                 prev_cnt = sp->mc_addr_count;
4841                 sp->mc_addr_count = dev->mc_count;
4842
4843                 /* Clear out the previous list of Mc in the H/W. */
4844                 for (i = 0; i < prev_cnt; i++) {
4845                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4846                                &bar0->rmac_addr_data0_mem);
4847                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4848                                 &bar0->rmac_addr_data1_mem);
4849                         val64 = RMAC_ADDR_CMD_MEM_WE |
4850                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4851                             RMAC_ADDR_CMD_MEM_OFFSET
4852                             (MAC_MC_ADDR_START_OFFSET + i);
4853                         writeq(val64, &bar0->rmac_addr_cmd_mem);
4854
4855                         /* Wait for command completes */
4856                         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4857                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4858                                         S2IO_BIT_RESET)) {
4859                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
4860                                           dev->name);
4861                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4862                                 return;
4863                         }
4864                 }
4865
4866                 /* Create the new Rx filter list and update the same in H/W. */
4867                 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
4868                      i++, mclist = mclist->next) {
4869                         memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
4870                                ETH_ALEN);
4871                         mac_addr = 0;
4872                         for (j = 0; j < ETH_ALEN; j++) {
4873                                 mac_addr |= mclist->dmi_addr[j];
4874                                 mac_addr <<= 8;
4875                         }
4876                         mac_addr >>= 8;
4877                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4878                                &bar0->rmac_addr_data0_mem);
4879                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4880                                 &bar0->rmac_addr_data1_mem);
4881                         val64 = RMAC_ADDR_CMD_MEM_WE |
4882                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4883                             RMAC_ADDR_CMD_MEM_OFFSET
4884                             (i + MAC_MC_ADDR_START_OFFSET);
4885                         writeq(val64, &bar0->rmac_addr_cmd_mem);
4886
4887                         /* Wait for command completes */
4888                         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4889                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4890                                         S2IO_BIT_RESET)) {
4891                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
4892                                           dev->name);
4893                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4894                                 return;
4895                         }
4896                 }
4897         }
4898 }
4899
4900 /**
4901  *  s2io_set_mac_addr - Programs the Xframe mac address
4902  *  @dev : pointer to the device structure.
4903  *  @addr: a uchar pointer to the new mac address which is to be set.
4904  *  Description : This procedure will program the Xframe to receive
4905  *  frames with new Mac Address
4906  *  Return value: SUCCESS on success and an appropriate (-)ve integer
4907  *  as defined in errno.h file on failure.
4908  */
4909
4910 static int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
4911 {
4912         struct s2io_nic *sp = dev->priv;
4913         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4914         register u64 val64, mac_addr = 0;
4915         int i;
4916         u64 old_mac_addr = 0;
4917
4918         /*
4919          * Set the new MAC address as the new unicast filter and reflect this
4920          * change on the device address registered with the OS. It will be
4921          * at offset 0.
4922          */
4923         for (i = 0; i < ETH_ALEN; i++) {
4924                 mac_addr <<= 8;
4925                 mac_addr |= addr[i];
4926                 old_mac_addr <<= 8;
4927                 old_mac_addr |= sp->def_mac_addr[0].mac_addr[i];
4928         }
4929
4930         if(0 == mac_addr)
4931                 return SUCCESS;
4932
4933         /* Update the internal structure with this new mac address */
4934         if(mac_addr != old_mac_addr) {
4935                 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
4936                 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_addr);
4937                 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_addr >> 8);
4938                 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_addr >> 16);
4939                 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_addr >> 24);
4940                 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_addr >> 32);
4941                 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_addr >> 40);
4942         }
4943
4944         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4945                &bar0->rmac_addr_data0_mem);
4946
4947         val64 =
4948             RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4949             RMAC_ADDR_CMD_MEM_OFFSET(0);
4950         writeq(val64, &bar0->rmac_addr_cmd_mem);
4951         /* Wait till command completes */
4952         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4953                       RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET)) {
4954                 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
4955                 return FAILURE;
4956         }
4957
4958         return SUCCESS;
4959 }
4960
4961 /**
4962  * s2io_ethtool_sset - Sets different link parameters.
4963  * @sp : private member of the device structure, which is a pointer to the  * s2io_nic structure.
4964  * @info: pointer to the structure with parameters given by ethtool to set
4965  * link information.
4966  * Description:
4967  * The function sets different link parameters provided by the user onto
4968  * the NIC.
4969  * Return value:
4970  * 0 on success.
4971 */
4972
4973 static int s2io_ethtool_sset(struct net_device *dev,
4974                              struct ethtool_cmd *info)
4975 {
4976         struct s2io_nic *sp = dev->priv;
4977         if ((info->autoneg == AUTONEG_ENABLE) ||
4978             (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
4979                 return -EINVAL;
4980         else {
4981                 s2io_close(sp->dev);
4982                 s2io_open(sp->dev);
4983         }
4984
4985         return 0;
4986 }
4987
4988 /**
4989  * s2io_ethtol_gset - Return link specific information.
4990  * @sp : private member of the device structure, pointer to the
4991  *      s2io_nic structure.
4992  * @info : pointer to the structure with parameters given by ethtool
4993  * to return link information.
4994  * Description:
4995  * Returns link specific information like speed, duplex etc.. to ethtool.
4996  * Return value :
4997  * return 0 on success.
4998  */
4999
5000 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
5001 {
5002         struct s2io_nic *sp = dev->priv;
5003         info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5004         info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5005         info->port = PORT_FIBRE;
5006         /* info->transceiver?? TODO */
5007
5008         if (netif_carrier_ok(sp->dev)) {
5009                 info->speed = 10000;
5010                 info->duplex = DUPLEX_FULL;
5011         } else {
5012                 info->speed = -1;
5013                 info->duplex = -1;
5014         }
5015
5016         info->autoneg = AUTONEG_DISABLE;
5017         return 0;
5018 }
5019
5020 /**
5021  * s2io_ethtool_gdrvinfo - Returns driver specific information.
5022  * @sp : private member of the device structure, which is a pointer to the
5023  * s2io_nic structure.
5024  * @info : pointer to the structure with parameters given by ethtool to
5025  * return driver information.
5026  * Description:
5027  * Returns driver specefic information like name, version etc.. to ethtool.
5028  * Return value:
5029  *  void
5030  */
5031
5032 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5033                                   struct ethtool_drvinfo *info)
5034 {
5035         struct s2io_nic *sp = dev->priv;
5036
5037         strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
5038         strncpy(info->version, s2io_driver_version, sizeof(info->version));
5039         strncpy(info->fw_version, "", sizeof(info->fw_version));
5040         strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5041         info->regdump_len = XENA_REG_SPACE;
5042         info->eedump_len = XENA_EEPROM_SPACE;
5043         info->testinfo_len = S2IO_TEST_LEN;
5044
5045         if (sp->device_type == XFRAME_I_DEVICE)
5046                 info->n_stats = XFRAME_I_STAT_LEN;
5047         else
5048                 info->n_stats = XFRAME_II_STAT_LEN;
5049 }
5050
5051 /**
5052  *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5053  *  @sp: private member of the device structure, which is a pointer to the
5054  *  s2io_nic structure.
5055  *  @regs : pointer to the structure with parameters given by ethtool for
5056  *  dumping the registers.
5057  *  @reg_space: The input argumnet into which all the registers are dumped.
5058  *  Description:
5059  *  Dumps the entire register space of xFrame NIC into the user given
5060  *  buffer area.
5061  * Return value :
5062  * void .
5063 */
5064
5065 static void s2io_ethtool_gregs(struct net_device *dev,
5066                                struct ethtool_regs *regs, void *space)
5067 {
5068         int i;
5069         u64 reg;
5070         u8 *reg_space = (u8 *) space;
5071         struct s2io_nic *sp = dev->priv;
5072
5073         regs->len = XENA_REG_SPACE;
5074         regs->version = sp->pdev->subsystem_device;
5075
5076         for (i = 0; i < regs->len; i += 8) {
5077                 reg = readq(sp->bar0 + i);
5078                 memcpy((reg_space + i), &reg, 8);
5079         }
5080 }
5081
5082 /**
5083  *  s2io_phy_id  - timer function that alternates adapter LED.
5084  *  @data : address of the private member of the device structure, which
5085  *  is a pointer to the s2io_nic structure, provided as an u32.
5086  * Description: This is actually the timer function that alternates the
5087  * adapter LED bit of the adapter control bit to set/reset every time on
5088  * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
5089  *  once every second.
5090 */
5091 static void s2io_phy_id(unsigned long data)
5092 {
5093         struct s2io_nic *sp = (struct s2io_nic *) data;
5094         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5095         u64 val64 = 0;
5096         u16 subid;
5097
5098         subid = sp->pdev->subsystem_device;
5099         if ((sp->device_type == XFRAME_II_DEVICE) ||
5100                    ((subid & 0xFF) >= 0x07)) {
5101                 val64 = readq(&bar0->gpio_control);
5102                 val64 ^= GPIO_CTRL_GPIO_0;
5103                 writeq(val64, &bar0->gpio_control);
5104         } else {
5105                 val64 = readq(&bar0->adapter_control);
5106                 val64 ^= ADAPTER_LED_ON;
5107                 writeq(val64, &bar0->adapter_control);
5108         }
5109
5110         mod_timer(&sp->id_timer, jiffies + HZ / 2);
5111 }
5112
5113 /**
5114  * s2io_ethtool_idnic - To physically identify the nic on the system.
5115  * @sp : private member of the device structure, which is a pointer to the
5116  * s2io_nic structure.
5117  * @id : pointer to the structure with identification parameters given by
5118  * ethtool.
5119  * Description: Used to physically identify the NIC on the system.
5120  * The Link LED will blink for a time specified by the user for
5121  * identification.
5122  * NOTE: The Link has to be Up to be able to blink the LED. Hence
5123  * identification is possible only if it's link is up.
5124  * Return value:
5125  * int , returns 0 on success
5126  */
5127
5128 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
5129 {
5130         u64 val64 = 0, last_gpio_ctrl_val;
5131         struct s2io_nic *sp = dev->priv;
5132         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5133         u16 subid;
5134
5135         subid = sp->pdev->subsystem_device;
5136         last_gpio_ctrl_val = readq(&bar0->gpio_control);
5137         if ((sp->device_type == XFRAME_I_DEVICE) &&
5138                 ((subid & 0xFF) < 0x07)) {
5139                 val64 = readq(&bar0->adapter_control);
5140                 if (!(val64 & ADAPTER_CNTL_EN)) {
5141                         printk(KERN_ERR
5142                                "Adapter Link down, cannot blink LED\n");
5143                         return -EFAULT;
5144                 }
5145         }
5146         if (sp->id_timer.function == NULL) {
5147                 init_timer(&sp->id_timer);
5148                 sp->id_timer.function = s2io_phy_id;
5149                 sp->id_timer.data = (unsigned long) sp;
5150         }
5151         mod_timer(&sp->id_timer, jiffies);
5152         if (data)
5153                 msleep_interruptible(data * HZ);
5154         else
5155                 msleep_interruptible(MAX_FLICKER_TIME);
5156         del_timer_sync(&sp->id_timer);
5157
5158         if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
5159                 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
5160                 last_gpio_ctrl_val = readq(&bar0->gpio_control);
5161         }
5162
5163         return 0;
5164 }
5165
5166 static void s2io_ethtool_gringparam(struct net_device *dev,
5167                                     struct ethtool_ringparam *ering)
5168 {
5169         struct s2io_nic *sp = dev->priv;
5170         int i,tx_desc_count=0,rx_desc_count=0;
5171
5172         if (sp->rxd_mode == RXD_MODE_1)
5173                 ering->rx_max_pending = MAX_RX_DESC_1;
5174         else if (sp->rxd_mode == RXD_MODE_3B)
5175                 ering->rx_max_pending = MAX_RX_DESC_2;
5176
5177         ering->tx_max_pending = MAX_TX_DESC;
5178         for (i = 0 ; i < sp->config.tx_fifo_num ; i++) 
5179                 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5180         
5181         DBG_PRINT(INFO_DBG,"\nmax txds : %d\n",sp->config.max_txds);
5182         ering->tx_pending = tx_desc_count;
5183         rx_desc_count = 0;
5184         for (i = 0 ; i < sp->config.rx_ring_num ; i++) 
5185                 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5186
5187         ering->rx_pending = rx_desc_count;
5188
5189         ering->rx_mini_max_pending = 0;
5190         ering->rx_mini_pending = 0;
5191         if(sp->rxd_mode == RXD_MODE_1)
5192                 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5193         else if (sp->rxd_mode == RXD_MODE_3B)
5194                 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5195         ering->rx_jumbo_pending = rx_desc_count;
5196 }
5197
5198 /**
5199  * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
5200  * @sp : private member of the device structure, which is a pointer to the
5201  *      s2io_nic structure.
5202  * @ep : pointer to the structure with pause parameters given by ethtool.
5203  * Description:
5204  * Returns the Pause frame generation and reception capability of the NIC.
5205  * Return value:
5206  *  void
5207  */
5208 static void s2io_ethtool_getpause_data(struct net_device *dev,
5209                                        struct ethtool_pauseparam *ep)
5210 {
5211         u64 val64;
5212         struct s2io_nic *sp = dev->priv;
5213         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5214
5215         val64 = readq(&bar0->rmac_pause_cfg);
5216         if (val64 & RMAC_PAUSE_GEN_ENABLE)
5217                 ep->tx_pause = TRUE;
5218         if (val64 & RMAC_PAUSE_RX_ENABLE)
5219                 ep->rx_pause = TRUE;
5220         ep->autoneg = FALSE;
5221 }
5222
5223 /**
5224  * s2io_ethtool_setpause_data -  set/reset pause frame generation.
5225  * @sp : private member of the device structure, which is a pointer to the
5226  *      s2io_nic structure.
5227  * @ep : pointer to the structure with pause parameters given by ethtool.
5228  * Description:
5229  * It can be used to set or reset Pause frame generation or reception
5230  * support of the NIC.
5231  * Return value:
5232  * int, returns 0 on Success
5233  */
5234
5235 static int s2io_ethtool_setpause_data(struct net_device *dev,
5236                                struct ethtool_pauseparam *ep)
5237 {
5238         u64 val64;
5239         struct s2io_nic *sp = dev->priv;
5240         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5241
5242         val64 = readq(&bar0->rmac_pause_cfg);
5243         if (ep->tx_pause)
5244                 val64 |= RMAC_PAUSE_GEN_ENABLE;
5245         else
5246                 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5247         if (ep->rx_pause)
5248                 val64 |= RMAC_PAUSE_RX_ENABLE;
5249         else
5250                 val64 &= ~RMAC_PAUSE_RX_ENABLE;
5251         writeq(val64, &bar0->rmac_pause_cfg);
5252         return 0;
5253 }
5254
5255 /**
5256  * read_eeprom - reads 4 bytes of data from user given offset.
5257  * @sp : private member of the device structure, which is a pointer to the
5258  *      s2io_nic structure.
5259  * @off : offset at which the data must be written
5260  * @data : Its an output parameter where the data read at the given
5261  *      offset is stored.
5262  * Description:
5263  * Will read 4 bytes of data from the user given offset and return the
5264  * read data.
5265  * NOTE: Will allow to read only part of the EEPROM visible through the
5266  *   I2C bus.
5267  * Return value:
5268  *  -1 on failure and 0 on success.
5269  */
5270
5271 #define S2IO_DEV_ID             5
5272 static int read_eeprom(struct s2io_nic * sp, int off, u64 * data)
5273 {
5274         int ret = -1;
5275         u32 exit_cnt = 0;
5276         u64 val64;
5277         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5278
5279         if (sp->device_type == XFRAME_I_DEVICE) {
5280                 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5281                     I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
5282                     I2C_CONTROL_CNTL_START;
5283                 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5284
5285                 while (exit_cnt < 5) {
5286                         val64 = readq(&bar0->i2c_control);
5287                         if (I2C_CONTROL_CNTL_END(val64)) {
5288                                 *data = I2C_CONTROL_GET_DATA(val64);
5289                                 ret = 0;
5290                                 break;
5291                         }
5292                         msleep(50);
5293                         exit_cnt++;
5294                 }
5295         }
5296
5297         if (sp->device_type == XFRAME_II_DEVICE) {
5298                 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5299                         SPI_CONTROL_BYTECNT(0x3) |
5300                         SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5301                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5302                 val64 |= SPI_CONTROL_REQ;
5303                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5304                 while (exit_cnt < 5) {
5305                         val64 = readq(&bar0->spi_control);
5306                         if (val64 & SPI_CONTROL_NACK) {
5307                                 ret = 1;
5308                                 break;
5309                         } else if (val64 & SPI_CONTROL_DONE) {
5310                                 *data = readq(&bar0->spi_data);
5311                                 *data &= 0xffffff;
5312                                 ret = 0;
5313                                 break;
5314                         }
5315                         msleep(50);
5316                         exit_cnt++;
5317                 }
5318         }
5319         return ret;
5320 }
5321
5322 /**
5323  *  write_eeprom - actually writes the relevant part of the data value.
5324  *  @sp : private member of the device structure, which is a pointer to the
5325  *       s2io_nic structure.
5326  *  @off : offset at which the data must be written
5327  *  @data : The data that is to be written
5328  *  @cnt : Number of bytes of the data that are actually to be written into
5329  *  the Eeprom. (max of 3)
5330  * Description:
5331  *  Actually writes the relevant part of the data value into the Eeprom
5332  *  through the I2C bus.
5333  * Return value:
5334  *  0 on success, -1 on failure.
5335  */
5336
5337 static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt)
5338 {
5339         int exit_cnt = 0, ret = -1;
5340         u64 val64;
5341         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5342
5343         if (sp->device_type == XFRAME_I_DEVICE) {
5344                 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5345                     I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
5346                     I2C_CONTROL_CNTL_START;
5347                 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5348
5349                 while (exit_cnt < 5) {
5350                         val64 = readq(&bar0->i2c_control);
5351                         if (I2C_CONTROL_CNTL_END(val64)) {
5352                                 if (!(val64 & I2C_CONTROL_NACK))
5353                                         ret = 0;
5354                                 break;
5355                         }
5356                         msleep(50);
5357                         exit_cnt++;
5358                 }
5359         }
5360
5361         if (sp->device_type == XFRAME_II_DEVICE) {
5362                 int write_cnt = (cnt == 8) ? 0 : cnt;
5363                 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
5364
5365                 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5366                         SPI_CONTROL_BYTECNT(write_cnt) |
5367                         SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5368                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5369                 val64 |= SPI_CONTROL_REQ;
5370                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5371                 while (exit_cnt < 5) {
5372                         val64 = readq(&bar0->spi_control);
5373                         if (val64 & SPI_CONTROL_NACK) {
5374                                 ret = 1;
5375                                 break;
5376                         } else if (val64 & SPI_CONTROL_DONE) {
5377                                 ret = 0;
5378                                 break;
5379                         }
5380                         msleep(50);
5381                         exit_cnt++;
5382                 }
5383         }
5384         return ret;
5385 }
5386 static void s2io_vpd_read(struct s2io_nic *nic)
5387 {
5388         u8 *vpd_data;
5389         u8 data;
5390         int i=0, cnt, fail = 0;
5391         int vpd_addr = 0x80;
5392
5393         if (nic->device_type == XFRAME_II_DEVICE) {
5394                 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5395                 vpd_addr = 0x80;
5396         }
5397         else {
5398                 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5399                 vpd_addr = 0x50;
5400         }
5401         strcpy(nic->serial_num, "NOT AVAILABLE");
5402
5403         vpd_data = kmalloc(256, GFP_KERNEL);
5404         if (!vpd_data) {
5405                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
5406                 return;
5407         }
5408         nic->mac_control.stats_info->sw_stat.mem_allocated += 256;
5409
5410         for (i = 0; i < 256; i +=4 ) {
5411                 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5412                 pci_read_config_byte(nic->pdev,  (vpd_addr + 2), &data);
5413                 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5414                 for (cnt = 0; cnt <5; cnt++) {
5415                         msleep(2);
5416                         pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5417                         if (data == 0x80)
5418                                 break;
5419                 }
5420                 if (cnt >= 5) {
5421                         DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5422                         fail = 1;
5423                         break;
5424                 }
5425                 pci_read_config_dword(nic->pdev,  (vpd_addr + 4),
5426                                       (u32 *)&vpd_data[i]);
5427         }
5428
5429         if(!fail) {
5430                 /* read serial number of adapter */
5431                 for (cnt = 0; cnt < 256; cnt++) {
5432                 if ((vpd_data[cnt] == 'S') &&
5433                         (vpd_data[cnt+1] == 'N') &&
5434                         (vpd_data[cnt+2] < VPD_STRING_LEN)) {
5435                                 memset(nic->serial_num, 0, VPD_STRING_LEN);
5436                                 memcpy(nic->serial_num, &vpd_data[cnt + 3],
5437                                         vpd_data[cnt+2]);
5438                                 break;
5439                         }
5440                 }
5441         }
5442
5443         if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5444                 memset(nic->product_name, 0, vpd_data[1]);
5445                 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
5446         }
5447         kfree(vpd_data);
5448         nic->mac_control.stats_info->sw_stat.mem_freed += 256;
5449 }
5450
5451 /**
5452  *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
5453  *  @sp : private member of the device structure, which is a pointer to the *       s2io_nic structure.
5454  *  @eeprom : pointer to the user level structure provided by ethtool,
5455  *  containing all relevant information.
5456  *  @data_buf : user defined value to be written into Eeprom.
5457  *  Description: Reads the values stored in the Eeprom at given offset
5458  *  for a given length. Stores these values int the input argument data
5459  *  buffer 'data_buf' and returns these to the caller (ethtool.)
5460  *  Return value:
5461  *  int  0 on success
5462  */
5463
5464 static int s2io_ethtool_geeprom(struct net_device *dev,
5465                          struct ethtool_eeprom *eeprom, u8 * data_buf)
5466 {
5467         u32 i, valid;
5468         u64 data;
5469         struct s2io_nic *sp = dev->priv;
5470
5471         eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5472
5473         if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5474                 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5475
5476         for (i = 0; i < eeprom->len; i += 4) {
5477                 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5478                         DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5479                         return -EFAULT;
5480                 }
5481                 valid = INV(data);
5482                 memcpy((data_buf + i), &valid, 4);
5483         }
5484         return 0;
5485 }
5486
5487 /**
5488  *  s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5489  *  @sp : private member of the device structure, which is a pointer to the
5490