5f0fcb04afff1e3999033f8fe003a4f74e3639f6
[linux-2.6.git] / drivers / net / s2io.c
1 /************************************************************************
2  * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3  * Copyright(c) 2002-2007 Neterion Inc.
4
5  * This software may be used and distributed according to the terms of
6  * the GNU General Public License (GPL), incorporated herein by reference.
7  * Drivers based on or derived from this code fall under the GPL and must
8  * retain the authorship, copyright and license notice.  This file is not
9  * a complete program and may only be used when the entire operating
10  * system is licensed under the GPL.
11  * See the file COPYING in this distribution for more information.
12  *
13  * Credits:
14  * Jeff Garzik          : For pointing out the improper error condition
15  *                        check in the s2io_xmit routine and also some
16  *                        issues in the Tx watch dog function. Also for
17  *                        patiently answering all those innumerable
18  *                        questions regaring the 2.6 porting issues.
19  * Stephen Hemminger    : Providing proper 2.6 porting mechanism for some
20  *                        macros available only in 2.6 Kernel.
21  * Francois Romieu      : For pointing out all code part that were
22  *                        deprecated and also styling related comments.
23  * Grant Grundler       : For helping me get rid of some Architecture
24  *                        dependent code.
25  * Christopher Hellwig  : Some more 2.6 specific issues in the driver.
26  *
27  * The module loadable parameters that are supported by the driver and a brief
28  * explaination of all the variables.
29  *
30  * rx_ring_num : This can be used to program the number of receive rings used
31  * in the driver.
32  * rx_ring_sz: This defines the number of receive blocks each ring can have.
33  *     This is also an array of size 8.
34  * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35  *              values are 1, 2.
36  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37  * tx_fifo_len: This too is an array of 8. Each element defines the number of
38  * Tx descriptors that can be associated with each corresponding FIFO.
39  * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40  *     2(MSI_X). Default value is '2(MSI_X)'
41  * lro_enable: Specifies whether to enable Large Receive Offload (LRO) or not.
42  *     Possible values '1' for enable '0' for disable. Default is '0'
43  * lro_max_pkts: This parameter defines maximum number of packets can be
44  *     aggregated as a single large packet
45  * napi: This parameter used to enable/disable NAPI (polling Rx)
46  *     Possible values '1' for enable and '0' for disable. Default is '1'
47  * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48  *      Possible values '1' for enable and '0' for disable. Default is '0'
49  * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50  *                 Possible values '1' for enable , '0' for disable.
51  *                 Default is '2' - which means disable in promisc mode
52  *                 and enable in non-promiscuous mode.
53  * multiq: This parameter used to enable/disable MULTIQUEUE support.
54  *      Possible values '1' for enable and '0' for disable. Default is '0'
55  ************************************************************************/
56
57 #include <linux/module.h>
58 #include <linux/types.h>
59 #include <linux/errno.h>
60 #include <linux/ioport.h>
61 #include <linux/pci.h>
62 #include <linux/dma-mapping.h>
63 #include <linux/kernel.h>
64 #include <linux/netdevice.h>
65 #include <linux/etherdevice.h>
66 #include <linux/skbuff.h>
67 #include <linux/init.h>
68 #include <linux/delay.h>
69 #include <linux/stddef.h>
70 #include <linux/ioctl.h>
71 #include <linux/timex.h>
72 #include <linux/ethtool.h>
73 #include <linux/workqueue.h>
74 #include <linux/if_vlan.h>
75 #include <linux/ip.h>
76 #include <linux/tcp.h>
77 #include <net/tcp.h>
78
79 #include <asm/system.h>
80 #include <asm/uaccess.h>
81 #include <asm/io.h>
82 #include <asm/div64.h>
83 #include <asm/irq.h>
84
85 /* local include */
86 #include "s2io.h"
87 #include "s2io-regs.h"
88
89 #define DRV_VERSION "2.0.26.25"
90
91 /* S2io Driver name & version. */
92 static char s2io_driver_name[] = "Neterion";
93 static char s2io_driver_version[] = DRV_VERSION;
94
95 static int rxd_size[2] = {32,48};
96 static int rxd_count[2] = {127,85};
97
98 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
99 {
100         int ret;
101
102         ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
103                 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
104
105         return ret;
106 }
107
108 /*
109  * Cards with following subsystem_id have a link state indication
110  * problem, 600B, 600C, 600D, 640B, 640C and 640D.
111  * macro below identifies these cards given the subsystem_id.
112  */
113 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
114         (dev_type == XFRAME_I_DEVICE) ?                 \
115                 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
116                  ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
117
118 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
119                                       ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
120
121 static inline int is_s2io_card_up(const struct s2io_nic * sp)
122 {
123         return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
124 }
125
126 /* Ethtool related variables and Macros. */
127 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
128         "Register test\t(offline)",
129         "Eeprom test\t(offline)",
130         "Link test\t(online)",
131         "RLDRAM test\t(offline)",
132         "BIST Test\t(offline)"
133 };
134
135 static char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
136         {"tmac_frms"},
137         {"tmac_data_octets"},
138         {"tmac_drop_frms"},
139         {"tmac_mcst_frms"},
140         {"tmac_bcst_frms"},
141         {"tmac_pause_ctrl_frms"},
142         {"tmac_ttl_octets"},
143         {"tmac_ucst_frms"},
144         {"tmac_nucst_frms"},
145         {"tmac_any_err_frms"},
146         {"tmac_ttl_less_fb_octets"},
147         {"tmac_vld_ip_octets"},
148         {"tmac_vld_ip"},
149         {"tmac_drop_ip"},
150         {"tmac_icmp"},
151         {"tmac_rst_tcp"},
152         {"tmac_tcp"},
153         {"tmac_udp"},
154         {"rmac_vld_frms"},
155         {"rmac_data_octets"},
156         {"rmac_fcs_err_frms"},
157         {"rmac_drop_frms"},
158         {"rmac_vld_mcst_frms"},
159         {"rmac_vld_bcst_frms"},
160         {"rmac_in_rng_len_err_frms"},
161         {"rmac_out_rng_len_err_frms"},
162         {"rmac_long_frms"},
163         {"rmac_pause_ctrl_frms"},
164         {"rmac_unsup_ctrl_frms"},
165         {"rmac_ttl_octets"},
166         {"rmac_accepted_ucst_frms"},
167         {"rmac_accepted_nucst_frms"},
168         {"rmac_discarded_frms"},
169         {"rmac_drop_events"},
170         {"rmac_ttl_less_fb_octets"},
171         {"rmac_ttl_frms"},
172         {"rmac_usized_frms"},
173         {"rmac_osized_frms"},
174         {"rmac_frag_frms"},
175         {"rmac_jabber_frms"},
176         {"rmac_ttl_64_frms"},
177         {"rmac_ttl_65_127_frms"},
178         {"rmac_ttl_128_255_frms"},
179         {"rmac_ttl_256_511_frms"},
180         {"rmac_ttl_512_1023_frms"},
181         {"rmac_ttl_1024_1518_frms"},
182         {"rmac_ip"},
183         {"rmac_ip_octets"},
184         {"rmac_hdr_err_ip"},
185         {"rmac_drop_ip"},
186         {"rmac_icmp"},
187         {"rmac_tcp"},
188         {"rmac_udp"},
189         {"rmac_err_drp_udp"},
190         {"rmac_xgmii_err_sym"},
191         {"rmac_frms_q0"},
192         {"rmac_frms_q1"},
193         {"rmac_frms_q2"},
194         {"rmac_frms_q3"},
195         {"rmac_frms_q4"},
196         {"rmac_frms_q5"},
197         {"rmac_frms_q6"},
198         {"rmac_frms_q7"},
199         {"rmac_full_q0"},
200         {"rmac_full_q1"},
201         {"rmac_full_q2"},
202         {"rmac_full_q3"},
203         {"rmac_full_q4"},
204         {"rmac_full_q5"},
205         {"rmac_full_q6"},
206         {"rmac_full_q7"},
207         {"rmac_pause_cnt"},
208         {"rmac_xgmii_data_err_cnt"},
209         {"rmac_xgmii_ctrl_err_cnt"},
210         {"rmac_accepted_ip"},
211         {"rmac_err_tcp"},
212         {"rd_req_cnt"},
213         {"new_rd_req_cnt"},
214         {"new_rd_req_rtry_cnt"},
215         {"rd_rtry_cnt"},
216         {"wr_rtry_rd_ack_cnt"},
217         {"wr_req_cnt"},
218         {"new_wr_req_cnt"},
219         {"new_wr_req_rtry_cnt"},
220         {"wr_rtry_cnt"},
221         {"wr_disc_cnt"},
222         {"rd_rtry_wr_ack_cnt"},
223         {"txp_wr_cnt"},
224         {"txd_rd_cnt"},
225         {"txd_wr_cnt"},
226         {"rxd_rd_cnt"},
227         {"rxd_wr_cnt"},
228         {"txf_rd_cnt"},
229         {"rxf_wr_cnt"}
230 };
231
232 static char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
233         {"rmac_ttl_1519_4095_frms"},
234         {"rmac_ttl_4096_8191_frms"},
235         {"rmac_ttl_8192_max_frms"},
236         {"rmac_ttl_gt_max_frms"},
237         {"rmac_osized_alt_frms"},
238         {"rmac_jabber_alt_frms"},
239         {"rmac_gt_max_alt_frms"},
240         {"rmac_vlan_frms"},
241         {"rmac_len_discard"},
242         {"rmac_fcs_discard"},
243         {"rmac_pf_discard"},
244         {"rmac_da_discard"},
245         {"rmac_red_discard"},
246         {"rmac_rts_discard"},
247         {"rmac_ingm_full_discard"},
248         {"link_fault_cnt"}
249 };
250
251 static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
252         {"\n DRIVER STATISTICS"},
253         {"single_bit_ecc_errs"},
254         {"double_bit_ecc_errs"},
255         {"parity_err_cnt"},
256         {"serious_err_cnt"},
257         {"soft_reset_cnt"},
258         {"fifo_full_cnt"},
259         {"ring_0_full_cnt"},
260         {"ring_1_full_cnt"},
261         {"ring_2_full_cnt"},
262         {"ring_3_full_cnt"},
263         {"ring_4_full_cnt"},
264         {"ring_5_full_cnt"},
265         {"ring_6_full_cnt"},
266         {"ring_7_full_cnt"},
267         {"alarm_transceiver_temp_high"},
268         {"alarm_transceiver_temp_low"},
269         {"alarm_laser_bias_current_high"},
270         {"alarm_laser_bias_current_low"},
271         {"alarm_laser_output_power_high"},
272         {"alarm_laser_output_power_low"},
273         {"warn_transceiver_temp_high"},
274         {"warn_transceiver_temp_low"},
275         {"warn_laser_bias_current_high"},
276         {"warn_laser_bias_current_low"},
277         {"warn_laser_output_power_high"},
278         {"warn_laser_output_power_low"},
279         {"lro_aggregated_pkts"},
280         {"lro_flush_both_count"},
281         {"lro_out_of_sequence_pkts"},
282         {"lro_flush_due_to_max_pkts"},
283         {"lro_avg_aggr_pkts"},
284         {"mem_alloc_fail_cnt"},
285         {"pci_map_fail_cnt"},
286         {"watchdog_timer_cnt"},
287         {"mem_allocated"},
288         {"mem_freed"},
289         {"link_up_cnt"},
290         {"link_down_cnt"},
291         {"link_up_time"},
292         {"link_down_time"},
293         {"tx_tcode_buf_abort_cnt"},
294         {"tx_tcode_desc_abort_cnt"},
295         {"tx_tcode_parity_err_cnt"},
296         {"tx_tcode_link_loss_cnt"},
297         {"tx_tcode_list_proc_err_cnt"},
298         {"rx_tcode_parity_err_cnt"},
299         {"rx_tcode_abort_cnt"},
300         {"rx_tcode_parity_abort_cnt"},
301         {"rx_tcode_rda_fail_cnt"},
302         {"rx_tcode_unkn_prot_cnt"},
303         {"rx_tcode_fcs_err_cnt"},
304         {"rx_tcode_buf_size_err_cnt"},
305         {"rx_tcode_rxd_corrupt_cnt"},
306         {"rx_tcode_unkn_err_cnt"},
307         {"tda_err_cnt"},
308         {"pfc_err_cnt"},
309         {"pcc_err_cnt"},
310         {"tti_err_cnt"},
311         {"tpa_err_cnt"},
312         {"sm_err_cnt"},
313         {"lso_err_cnt"},
314         {"mac_tmac_err_cnt"},
315         {"mac_rmac_err_cnt"},
316         {"xgxs_txgxs_err_cnt"},
317         {"xgxs_rxgxs_err_cnt"},
318         {"rc_err_cnt"},
319         {"prc_pcix_err_cnt"},
320         {"rpa_err_cnt"},
321         {"rda_err_cnt"},
322         {"rti_err_cnt"},
323         {"mc_err_cnt"}
324 };
325
326 #define S2IO_XENA_STAT_LEN      ARRAY_SIZE(ethtool_xena_stats_keys)
327 #define S2IO_ENHANCED_STAT_LEN  ARRAY_SIZE(ethtool_enhanced_stats_keys)
328 #define S2IO_DRIVER_STAT_LEN    ARRAY_SIZE(ethtool_driver_stats_keys)
329
330 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN )
331 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN )
332
333 #define XFRAME_I_STAT_STRINGS_LEN ( XFRAME_I_STAT_LEN * ETH_GSTRING_LEN )
334 #define XFRAME_II_STAT_STRINGS_LEN ( XFRAME_II_STAT_LEN * ETH_GSTRING_LEN )
335
336 #define S2IO_TEST_LEN   ARRAY_SIZE(s2io_gstrings)
337 #define S2IO_STRINGS_LEN        S2IO_TEST_LEN * ETH_GSTRING_LEN
338
339 #define S2IO_TIMER_CONF(timer, handle, arg, exp)                \
340                         init_timer(&timer);                     \
341                         timer.function = handle;                \
342                         timer.data = (unsigned long) arg;       \
343                         mod_timer(&timer, (jiffies + exp))      \
344
345 /* copy mac addr to def_mac_addr array */
346 static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
347 {
348         sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
349         sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
350         sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
351         sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
352         sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
353         sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
354 }
355 /* Add the vlan */
356 static void s2io_vlan_rx_register(struct net_device *dev,
357                                         struct vlan_group *grp)
358 {
359         int i;
360         struct s2io_nic *nic = dev->priv;
361         unsigned long flags[MAX_TX_FIFOS];
362         struct mac_info *mac_control = &nic->mac_control;
363         struct config_param *config = &nic->config;
364
365         for (i = 0; i < config->tx_fifo_num; i++)
366                 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags[i]);
367
368         nic->vlgrp = grp;
369         for (i = config->tx_fifo_num - 1; i >= 0; i--)
370                 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock,
371                                 flags[i]);
372 }
373
374 /* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
375 static int vlan_strip_flag;
376
377 /* Unregister the vlan */
378 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
379 {
380         int i;
381         struct s2io_nic *nic = dev->priv;
382         unsigned long flags[MAX_TX_FIFOS];
383         struct mac_info *mac_control = &nic->mac_control;
384         struct config_param *config = &nic->config;
385
386         for (i = 0; i < config->tx_fifo_num; i++)
387                 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags[i]);
388
389         if (nic->vlgrp)
390                 vlan_group_set_device(nic->vlgrp, vid, NULL);
391
392         for (i = config->tx_fifo_num - 1; i >= 0; i--)
393                 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock,
394                         flags[i]);
395 }
396
397 /*
398  * Constants to be programmed into the Xena's registers, to configure
399  * the XAUI.
400  */
401
402 #define END_SIGN        0x0
403 static const u64 herc_act_dtx_cfg[] = {
404         /* Set address */
405         0x8000051536750000ULL, 0x80000515367500E0ULL,
406         /* Write data */
407         0x8000051536750004ULL, 0x80000515367500E4ULL,
408         /* Set address */
409         0x80010515003F0000ULL, 0x80010515003F00E0ULL,
410         /* Write data */
411         0x80010515003F0004ULL, 0x80010515003F00E4ULL,
412         /* Set address */
413         0x801205150D440000ULL, 0x801205150D4400E0ULL,
414         /* Write data */
415         0x801205150D440004ULL, 0x801205150D4400E4ULL,
416         /* Set address */
417         0x80020515F2100000ULL, 0x80020515F21000E0ULL,
418         /* Write data */
419         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
420         /* Done */
421         END_SIGN
422 };
423
424 static const u64 xena_dtx_cfg[] = {
425         /* Set address */
426         0x8000051500000000ULL, 0x80000515000000E0ULL,
427         /* Write data */
428         0x80000515D9350004ULL, 0x80000515D93500E4ULL,
429         /* Set address */
430         0x8001051500000000ULL, 0x80010515000000E0ULL,
431         /* Write data */
432         0x80010515001E0004ULL, 0x80010515001E00E4ULL,
433         /* Set address */
434         0x8002051500000000ULL, 0x80020515000000E0ULL,
435         /* Write data */
436         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
437         END_SIGN
438 };
439
440 /*
441  * Constants for Fixing the MacAddress problem seen mostly on
442  * Alpha machines.
443  */
444 static const u64 fix_mac[] = {
445         0x0060000000000000ULL, 0x0060600000000000ULL,
446         0x0040600000000000ULL, 0x0000600000000000ULL,
447         0x0020600000000000ULL, 0x0060600000000000ULL,
448         0x0020600000000000ULL, 0x0060600000000000ULL,
449         0x0020600000000000ULL, 0x0060600000000000ULL,
450         0x0020600000000000ULL, 0x0060600000000000ULL,
451         0x0020600000000000ULL, 0x0060600000000000ULL,
452         0x0020600000000000ULL, 0x0060600000000000ULL,
453         0x0020600000000000ULL, 0x0060600000000000ULL,
454         0x0020600000000000ULL, 0x0060600000000000ULL,
455         0x0020600000000000ULL, 0x0060600000000000ULL,
456         0x0020600000000000ULL, 0x0060600000000000ULL,
457         0x0020600000000000ULL, 0x0000600000000000ULL,
458         0x0040600000000000ULL, 0x0060600000000000ULL,
459         END_SIGN
460 };
461
462 MODULE_LICENSE("GPL");
463 MODULE_VERSION(DRV_VERSION);
464
465
466 /* Module Loadable parameters. */
467 S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
468 S2IO_PARM_INT(rx_ring_num, 1);
469 S2IO_PARM_INT(multiq, 0);
470 S2IO_PARM_INT(rx_ring_mode, 1);
471 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
472 S2IO_PARM_INT(rmac_pause_time, 0x100);
473 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
474 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
475 S2IO_PARM_INT(shared_splits, 0);
476 S2IO_PARM_INT(tmac_util_period, 5);
477 S2IO_PARM_INT(rmac_util_period, 5);
478 S2IO_PARM_INT(l3l4hdr_size, 128);
479 /* 0 is no steering, 1 is Priority steering, 2 is Default steering */
480 S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
481 /* Frequency of Rx desc syncs expressed as power of 2 */
482 S2IO_PARM_INT(rxsync_frequency, 3);
483 /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
484 S2IO_PARM_INT(intr_type, 2);
485 /* Large receive offload feature */
486 static unsigned int lro_enable;
487 module_param_named(lro, lro_enable, uint, 0);
488
489 /* Max pkts to be aggregated by LRO at one time. If not specified,
490  * aggregation happens until we hit max IP pkt size(64K)
491  */
492 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
493 S2IO_PARM_INT(indicate_max_pkts, 0);
494
495 S2IO_PARM_INT(napi, 1);
496 S2IO_PARM_INT(ufo, 0);
497 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
498
499 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
500     {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
501 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
502     {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
503 static unsigned int rts_frm_len[MAX_RX_RINGS] =
504     {[0 ...(MAX_RX_RINGS - 1)] = 0 };
505
506 module_param_array(tx_fifo_len, uint, NULL, 0);
507 module_param_array(rx_ring_sz, uint, NULL, 0);
508 module_param_array(rts_frm_len, uint, NULL, 0);
509
510 /*
511  * S2IO device table.
512  * This table lists all the devices that this driver supports.
513  */
514 static struct pci_device_id s2io_tbl[] __devinitdata = {
515         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
516          PCI_ANY_ID, PCI_ANY_ID},
517         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
518          PCI_ANY_ID, PCI_ANY_ID},
519         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
520          PCI_ANY_ID, PCI_ANY_ID},
521         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
522          PCI_ANY_ID, PCI_ANY_ID},
523         {0,}
524 };
525
526 MODULE_DEVICE_TABLE(pci, s2io_tbl);
527
528 static struct pci_error_handlers s2io_err_handler = {
529         .error_detected = s2io_io_error_detected,
530         .slot_reset = s2io_io_slot_reset,
531         .resume = s2io_io_resume,
532 };
533
534 static struct pci_driver s2io_driver = {
535       .name = "S2IO",
536       .id_table = s2io_tbl,
537       .probe = s2io_init_nic,
538       .remove = __devexit_p(s2io_rem_nic),
539       .err_handler = &s2io_err_handler,
540 };
541
542 /* A simplifier macro used both by init and free shared_mem Fns(). */
543 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
544
545 /* netqueue manipulation helper functions */
546 static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
547 {
548         int i;
549         if (sp->config.multiq) {
550                 for (i = 0; i < sp->config.tx_fifo_num; i++)
551                         netif_stop_subqueue(sp->dev, i);
552         } else {
553                 for (i = 0; i < sp->config.tx_fifo_num; i++)
554                         sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
555                 netif_stop_queue(sp->dev);
556         }
557 }
558
559 static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
560 {
561         if (sp->config.multiq)
562                 netif_stop_subqueue(sp->dev, fifo_no);
563         else {
564                 sp->mac_control.fifos[fifo_no].queue_state =
565                         FIFO_QUEUE_STOP;
566                 netif_stop_queue(sp->dev);
567         }
568 }
569
570 static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
571 {
572         int i;
573         if (sp->config.multiq) {
574                 for (i = 0; i < sp->config.tx_fifo_num; i++)
575                         netif_start_subqueue(sp->dev, i);
576         } else {
577                 for (i = 0; i < sp->config.tx_fifo_num; i++)
578                         sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
579                 netif_start_queue(sp->dev);
580         }
581 }
582
583 static inline void s2io_start_tx_queue(struct s2io_nic *sp, int fifo_no)
584 {
585         if (sp->config.multiq)
586                 netif_start_subqueue(sp->dev, fifo_no);
587         else {
588                 sp->mac_control.fifos[fifo_no].queue_state =
589                         FIFO_QUEUE_START;
590                 netif_start_queue(sp->dev);
591         }
592 }
593
594 static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
595 {
596         int i;
597         if (sp->config.multiq) {
598                 for (i = 0; i < sp->config.tx_fifo_num; i++)
599                         netif_wake_subqueue(sp->dev, i);
600         } else {
601                 for (i = 0; i < sp->config.tx_fifo_num; i++)
602                         sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
603                 netif_wake_queue(sp->dev);
604         }
605 }
606
607 static inline void s2io_wake_tx_queue(
608         struct fifo_info *fifo, int cnt, u8 multiq)
609 {
610
611         if (multiq) {
612                 if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
613                         netif_wake_subqueue(fifo->dev, fifo->fifo_no);
614         } else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
615                 if (netif_queue_stopped(fifo->dev)) {
616                         fifo->queue_state = FIFO_QUEUE_START;
617                         netif_wake_queue(fifo->dev);
618                 }
619         }
620 }
621
622 /**
623  * init_shared_mem - Allocation and Initialization of Memory
624  * @nic: Device private variable.
625  * Description: The function allocates all the memory areas shared
626  * between the NIC and the driver. This includes Tx descriptors,
627  * Rx descriptors and the statistics block.
628  */
629
630 static int init_shared_mem(struct s2io_nic *nic)
631 {
632         u32 size;
633         void *tmp_v_addr, *tmp_v_addr_next;
634         dma_addr_t tmp_p_addr, tmp_p_addr_next;
635         struct RxD_block *pre_rxd_blk = NULL;
636         int i, j, blk_cnt;
637         int lst_size, lst_per_page;
638         struct net_device *dev = nic->dev;
639         unsigned long tmp;
640         struct buffAdd *ba;
641
642         struct mac_info *mac_control;
643         struct config_param *config;
644         unsigned long long mem_allocated = 0;
645
646         mac_control = &nic->mac_control;
647         config = &nic->config;
648
649
650         /* Allocation and initialization of TXDLs in FIOFs */
651         size = 0;
652         for (i = 0; i < config->tx_fifo_num; i++) {
653                 size += config->tx_cfg[i].fifo_len;
654         }
655         if (size > MAX_AVAILABLE_TXDS) {
656                 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
657                 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
658                 return -EINVAL;
659         }
660
661         size = 0;
662         for (i = 0; i < config->tx_fifo_num; i++) {
663                 size = config->tx_cfg[i].fifo_len;
664                 /*
665                  * Legal values are from 2 to 8192
666                  */
667                 if (size < 2) {
668                         DBG_PRINT(ERR_DBG, "s2io: Invalid fifo len (%d)", size);
669                         DBG_PRINT(ERR_DBG, "for fifo %d\n", i);
670                         DBG_PRINT(ERR_DBG, "s2io: Legal values for fifo len"
671                                 "are 2 to 8192\n");
672                         return -EINVAL;
673                 }
674         }
675
676         lst_size = (sizeof(struct TxD) * config->max_txds);
677         lst_per_page = PAGE_SIZE / lst_size;
678
679         for (i = 0; i < config->tx_fifo_num; i++) {
680                 int fifo_len = config->tx_cfg[i].fifo_len;
681                 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
682                 mac_control->fifos[i].list_info = kzalloc(list_holder_size,
683                                                           GFP_KERNEL);
684                 if (!mac_control->fifos[i].list_info) {
685                         DBG_PRINT(INFO_DBG,
686                                   "Malloc failed for list_info\n");
687                         return -ENOMEM;
688                 }
689                 mem_allocated += list_holder_size;
690         }
691         for (i = 0; i < config->tx_fifo_num; i++) {
692                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
693                                                 lst_per_page);
694                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
695                 mac_control->fifos[i].tx_curr_put_info.fifo_len =
696                     config->tx_cfg[i].fifo_len - 1;
697                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
698                 mac_control->fifos[i].tx_curr_get_info.fifo_len =
699                     config->tx_cfg[i].fifo_len - 1;
700                 mac_control->fifos[i].fifo_no = i;
701                 mac_control->fifos[i].nic = nic;
702                 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
703                 mac_control->fifos[i].dev = dev;
704
705                 for (j = 0; j < page_num; j++) {
706                         int k = 0;
707                         dma_addr_t tmp_p;
708                         void *tmp_v;
709                         tmp_v = pci_alloc_consistent(nic->pdev,
710                                                      PAGE_SIZE, &tmp_p);
711                         if (!tmp_v) {
712                                 DBG_PRINT(INFO_DBG,
713                                           "pci_alloc_consistent ");
714                                 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
715                                 return -ENOMEM;
716                         }
717                         /* If we got a zero DMA address(can happen on
718                          * certain platforms like PPC), reallocate.
719                          * Store virtual address of page we don't want,
720                          * to be freed later.
721                          */
722                         if (!tmp_p) {
723                                 mac_control->zerodma_virt_addr = tmp_v;
724                                 DBG_PRINT(INIT_DBG,
725                                 "%s: Zero DMA address for TxDL. ", dev->name);
726                                 DBG_PRINT(INIT_DBG,
727                                 "Virtual address %p\n", tmp_v);
728                                 tmp_v = pci_alloc_consistent(nic->pdev,
729                                                      PAGE_SIZE, &tmp_p);
730                                 if (!tmp_v) {
731                                         DBG_PRINT(INFO_DBG,
732                                           "pci_alloc_consistent ");
733                                         DBG_PRINT(INFO_DBG, "failed for TxDL\n");
734                                         return -ENOMEM;
735                                 }
736                                 mem_allocated += PAGE_SIZE;
737                         }
738                         while (k < lst_per_page) {
739                                 int l = (j * lst_per_page) + k;
740                                 if (l == config->tx_cfg[i].fifo_len)
741                                         break;
742                                 mac_control->fifos[i].list_info[l].list_virt_addr =
743                                     tmp_v + (k * lst_size);
744                                 mac_control->fifos[i].list_info[l].list_phy_addr =
745                                     tmp_p + (k * lst_size);
746                                 k++;
747                         }
748                 }
749         }
750
751         for (i = 0; i < config->tx_fifo_num; i++) {
752                 size = config->tx_cfg[i].fifo_len;
753                 mac_control->fifos[i].ufo_in_band_v
754                         = kcalloc(size, sizeof(u64), GFP_KERNEL);
755                 if (!mac_control->fifos[i].ufo_in_band_v)
756                         return -ENOMEM;
757                 mem_allocated += (size * sizeof(u64));
758         }
759
760         /* Allocation and initialization of RXDs in Rings */
761         size = 0;
762         for (i = 0; i < config->rx_ring_num; i++) {
763                 if (config->rx_cfg[i].num_rxd %
764                     (rxd_count[nic->rxd_mode] + 1)) {
765                         DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
766                         DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
767                                   i);
768                         DBG_PRINT(ERR_DBG, "RxDs per Block");
769                         return FAILURE;
770                 }
771                 size += config->rx_cfg[i].num_rxd;
772                 mac_control->rings[i].block_count =
773                         config->rx_cfg[i].num_rxd /
774                         (rxd_count[nic->rxd_mode] + 1 );
775                 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
776                         mac_control->rings[i].block_count;
777         }
778         if (nic->rxd_mode == RXD_MODE_1)
779                 size = (size * (sizeof(struct RxD1)));
780         else
781                 size = (size * (sizeof(struct RxD3)));
782
783         for (i = 0; i < config->rx_ring_num; i++) {
784                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
785                 mac_control->rings[i].rx_curr_get_info.offset = 0;
786                 mac_control->rings[i].rx_curr_get_info.ring_len =
787                     config->rx_cfg[i].num_rxd - 1;
788                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
789                 mac_control->rings[i].rx_curr_put_info.offset = 0;
790                 mac_control->rings[i].rx_curr_put_info.ring_len =
791                     config->rx_cfg[i].num_rxd - 1;
792                 mac_control->rings[i].nic = nic;
793                 mac_control->rings[i].ring_no = i;
794                 mac_control->rings[i].lro = lro_enable;
795
796                 blk_cnt = config->rx_cfg[i].num_rxd /
797                                 (rxd_count[nic->rxd_mode] + 1);
798                 /*  Allocating all the Rx blocks */
799                 for (j = 0; j < blk_cnt; j++) {
800                         struct rx_block_info *rx_blocks;
801                         int l;
802
803                         rx_blocks = &mac_control->rings[i].rx_blocks[j];
804                         size = SIZE_OF_BLOCK; //size is always page size
805                         tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
806                                                           &tmp_p_addr);
807                         if (tmp_v_addr == NULL) {
808                                 /*
809                                  * In case of failure, free_shared_mem()
810                                  * is called, which should free any
811                                  * memory that was alloced till the
812                                  * failure happened.
813                                  */
814                                 rx_blocks->block_virt_addr = tmp_v_addr;
815                                 return -ENOMEM;
816                         }
817                         mem_allocated += size;
818                         memset(tmp_v_addr, 0, size);
819                         rx_blocks->block_virt_addr = tmp_v_addr;
820                         rx_blocks->block_dma_addr = tmp_p_addr;
821                         rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
822                                                   rxd_count[nic->rxd_mode],
823                                                   GFP_KERNEL);
824                         if (!rx_blocks->rxds)
825                                 return -ENOMEM;
826                         mem_allocated +=
827                         (sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
828                         for (l=0; l<rxd_count[nic->rxd_mode];l++) {
829                                 rx_blocks->rxds[l].virt_addr =
830                                         rx_blocks->block_virt_addr +
831                                         (rxd_size[nic->rxd_mode] * l);
832                                 rx_blocks->rxds[l].dma_addr =
833                                         rx_blocks->block_dma_addr +
834                                         (rxd_size[nic->rxd_mode] * l);
835                         }
836                 }
837                 /* Interlinking all Rx Blocks */
838                 for (j = 0; j < blk_cnt; j++) {
839                         tmp_v_addr =
840                                 mac_control->rings[i].rx_blocks[j].block_virt_addr;
841                         tmp_v_addr_next =
842                                 mac_control->rings[i].rx_blocks[(j + 1) %
843                                               blk_cnt].block_virt_addr;
844                         tmp_p_addr =
845                                 mac_control->rings[i].rx_blocks[j].block_dma_addr;
846                         tmp_p_addr_next =
847                                 mac_control->rings[i].rx_blocks[(j + 1) %
848                                               blk_cnt].block_dma_addr;
849
850                         pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
851                         pre_rxd_blk->reserved_2_pNext_RxD_block =
852                             (unsigned long) tmp_v_addr_next;
853                         pre_rxd_blk->pNext_RxD_Blk_physical =
854                             (u64) tmp_p_addr_next;
855                 }
856         }
857         if (nic->rxd_mode == RXD_MODE_3B) {
858                 /*
859                  * Allocation of Storages for buffer addresses in 2BUFF mode
860                  * and the buffers as well.
861                  */
862                 for (i = 0; i < config->rx_ring_num; i++) {
863                         blk_cnt = config->rx_cfg[i].num_rxd /
864                            (rxd_count[nic->rxd_mode]+ 1);
865                         mac_control->rings[i].ba =
866                                 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
867                                      GFP_KERNEL);
868                         if (!mac_control->rings[i].ba)
869                                 return -ENOMEM;
870                         mem_allocated +=(sizeof(struct buffAdd *) * blk_cnt);
871                         for (j = 0; j < blk_cnt; j++) {
872                                 int k = 0;
873                                 mac_control->rings[i].ba[j] =
874                                         kmalloc((sizeof(struct buffAdd) *
875                                                 (rxd_count[nic->rxd_mode] + 1)),
876                                                 GFP_KERNEL);
877                                 if (!mac_control->rings[i].ba[j])
878                                         return -ENOMEM;
879                                 mem_allocated += (sizeof(struct buffAdd) *  \
880                                         (rxd_count[nic->rxd_mode] + 1));
881                                 while (k != rxd_count[nic->rxd_mode]) {
882                                         ba = &mac_control->rings[i].ba[j][k];
883
884                                         ba->ba_0_org = (void *) kmalloc
885                                             (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
886                                         if (!ba->ba_0_org)
887                                                 return -ENOMEM;
888                                         mem_allocated +=
889                                                 (BUF0_LEN + ALIGN_SIZE);
890                                         tmp = (unsigned long)ba->ba_0_org;
891                                         tmp += ALIGN_SIZE;
892                                         tmp &= ~((unsigned long) ALIGN_SIZE);
893                                         ba->ba_0 = (void *) tmp;
894
895                                         ba->ba_1_org = (void *) kmalloc
896                                             (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
897                                         if (!ba->ba_1_org)
898                                                 return -ENOMEM;
899                                         mem_allocated
900                                                 += (BUF1_LEN + ALIGN_SIZE);
901                                         tmp = (unsigned long) ba->ba_1_org;
902                                         tmp += ALIGN_SIZE;
903                                         tmp &= ~((unsigned long) ALIGN_SIZE);
904                                         ba->ba_1 = (void *) tmp;
905                                         k++;
906                                 }
907                         }
908                 }
909         }
910
911         /* Allocation and initialization of Statistics block */
912         size = sizeof(struct stat_block);
913         mac_control->stats_mem = pci_alloc_consistent
914             (nic->pdev, size, &mac_control->stats_mem_phy);
915
916         if (!mac_control->stats_mem) {
917                 /*
918                  * In case of failure, free_shared_mem() is called, which
919                  * should free any memory that was alloced till the
920                  * failure happened.
921                  */
922                 return -ENOMEM;
923         }
924         mem_allocated += size;
925         mac_control->stats_mem_sz = size;
926
927         tmp_v_addr = mac_control->stats_mem;
928         mac_control->stats_info = (struct stat_block *) tmp_v_addr;
929         memset(tmp_v_addr, 0, size);
930         DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
931                   (unsigned long long) tmp_p_addr);
932         mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
933         return SUCCESS;
934 }
935
936 /**
937  * free_shared_mem - Free the allocated Memory
938  * @nic:  Device private variable.
939  * Description: This function is to free all memory locations allocated by
940  * the init_shared_mem() function and return it to the kernel.
941  */
942
943 static void free_shared_mem(struct s2io_nic *nic)
944 {
945         int i, j, blk_cnt, size;
946         void *tmp_v_addr;
947         dma_addr_t tmp_p_addr;
948         struct mac_info *mac_control;
949         struct config_param *config;
950         int lst_size, lst_per_page;
951         struct net_device *dev;
952         int page_num = 0;
953
954         if (!nic)
955                 return;
956
957         dev = nic->dev;
958
959         mac_control = &nic->mac_control;
960         config = &nic->config;
961
962         lst_size = (sizeof(struct TxD) * config->max_txds);
963         lst_per_page = PAGE_SIZE / lst_size;
964
965         for (i = 0; i < config->tx_fifo_num; i++) {
966                 page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
967                                                         lst_per_page);
968                 for (j = 0; j < page_num; j++) {
969                         int mem_blks = (j * lst_per_page);
970                         if (!mac_control->fifos[i].list_info)
971                                 return;
972                         if (!mac_control->fifos[i].list_info[mem_blks].
973                                  list_virt_addr)
974                                 break;
975                         pci_free_consistent(nic->pdev, PAGE_SIZE,
976                                             mac_control->fifos[i].
977                                             list_info[mem_blks].
978                                             list_virt_addr,
979                                             mac_control->fifos[i].
980                                             list_info[mem_blks].
981                                             list_phy_addr);
982                         nic->mac_control.stats_info->sw_stat.mem_freed
983                                                 += PAGE_SIZE;
984                 }
985                 /* If we got a zero DMA address during allocation,
986                  * free the page now
987                  */
988                 if (mac_control->zerodma_virt_addr) {
989                         pci_free_consistent(nic->pdev, PAGE_SIZE,
990                                             mac_control->zerodma_virt_addr,
991                                             (dma_addr_t)0);
992                         DBG_PRINT(INIT_DBG,
993                                 "%s: Freeing TxDL with zero DMA addr. ",
994                                 dev->name);
995                         DBG_PRINT(INIT_DBG, "Virtual address %p\n",
996                                 mac_control->zerodma_virt_addr);
997                         nic->mac_control.stats_info->sw_stat.mem_freed
998                                                 += PAGE_SIZE;
999                 }
1000                 kfree(mac_control->fifos[i].list_info);
1001                 nic->mac_control.stats_info->sw_stat.mem_freed +=
1002                 (nic->config.tx_cfg[i].fifo_len *sizeof(struct list_info_hold));
1003         }
1004
1005         size = SIZE_OF_BLOCK;
1006         for (i = 0; i < config->rx_ring_num; i++) {
1007                 blk_cnt = mac_control->rings[i].block_count;
1008                 for (j = 0; j < blk_cnt; j++) {
1009                         tmp_v_addr = mac_control->rings[i].rx_blocks[j].
1010                                 block_virt_addr;
1011                         tmp_p_addr = mac_control->rings[i].rx_blocks[j].
1012                                 block_dma_addr;
1013                         if (tmp_v_addr == NULL)
1014                                 break;
1015                         pci_free_consistent(nic->pdev, size,
1016                                             tmp_v_addr, tmp_p_addr);
1017                         nic->mac_control.stats_info->sw_stat.mem_freed += size;
1018                         kfree(mac_control->rings[i].rx_blocks[j].rxds);
1019                         nic->mac_control.stats_info->sw_stat.mem_freed +=
1020                         ( sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
1021                 }
1022         }
1023
1024         if (nic->rxd_mode == RXD_MODE_3B) {
1025                 /* Freeing buffer storage addresses in 2BUFF mode. */
1026                 for (i = 0; i < config->rx_ring_num; i++) {
1027                         blk_cnt = config->rx_cfg[i].num_rxd /
1028                             (rxd_count[nic->rxd_mode] + 1);
1029                         for (j = 0; j < blk_cnt; j++) {
1030                                 int k = 0;
1031                                 if (!mac_control->rings[i].ba[j])
1032                                         continue;
1033                                 while (k != rxd_count[nic->rxd_mode]) {
1034                                         struct buffAdd *ba =
1035                                                 &mac_control->rings[i].ba[j][k];
1036                                         kfree(ba->ba_0_org);
1037                                         nic->mac_control.stats_info->sw_stat.\
1038                                         mem_freed += (BUF0_LEN + ALIGN_SIZE);
1039                                         kfree(ba->ba_1_org);
1040                                         nic->mac_control.stats_info->sw_stat.\
1041                                         mem_freed += (BUF1_LEN + ALIGN_SIZE);
1042                                         k++;
1043                                 }
1044                                 kfree(mac_control->rings[i].ba[j]);
1045                                 nic->mac_control.stats_info->sw_stat.mem_freed +=
1046                                         (sizeof(struct buffAdd) *
1047                                         (rxd_count[nic->rxd_mode] + 1));
1048                         }
1049                         kfree(mac_control->rings[i].ba);
1050                         nic->mac_control.stats_info->sw_stat.mem_freed +=
1051                         (sizeof(struct buffAdd *) * blk_cnt);
1052                 }
1053         }
1054
1055         for (i = 0; i < nic->config.tx_fifo_num; i++) {
1056                 if (mac_control->fifos[i].ufo_in_band_v) {
1057                         nic->mac_control.stats_info->sw_stat.mem_freed
1058                                 += (config->tx_cfg[i].fifo_len * sizeof(u64));
1059                         kfree(mac_control->fifos[i].ufo_in_band_v);
1060                 }
1061         }
1062
1063         if (mac_control->stats_mem) {
1064                 nic->mac_control.stats_info->sw_stat.mem_freed +=
1065                         mac_control->stats_mem_sz;
1066                 pci_free_consistent(nic->pdev,
1067                                     mac_control->stats_mem_sz,
1068                                     mac_control->stats_mem,
1069                                     mac_control->stats_mem_phy);
1070         }
1071 }
1072
1073 /**
1074  * s2io_verify_pci_mode -
1075  */
1076
1077 static int s2io_verify_pci_mode(struct s2io_nic *nic)
1078 {
1079         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1080         register u64 val64 = 0;
1081         int     mode;
1082
1083         val64 = readq(&bar0->pci_mode);
1084         mode = (u8)GET_PCI_MODE(val64);
1085
1086         if ( val64 & PCI_MODE_UNKNOWN_MODE)
1087                 return -1;      /* Unknown PCI mode */
1088         return mode;
1089 }
1090
1091 #define NEC_VENID   0x1033
1092 #define NEC_DEVID   0x0125
1093 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1094 {
1095         struct pci_dev *tdev = NULL;
1096         while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
1097                 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
1098                         if (tdev->bus == s2io_pdev->bus->parent) {
1099                                 pci_dev_put(tdev);
1100                                 return 1;
1101                         }
1102                 }
1103         }
1104         return 0;
1105 }
1106
1107 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
1108 /**
1109  * s2io_print_pci_mode -
1110  */
1111 static int s2io_print_pci_mode(struct s2io_nic *nic)
1112 {
1113         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1114         register u64 val64 = 0;
1115         int     mode;
1116         struct config_param *config = &nic->config;
1117
1118         val64 = readq(&bar0->pci_mode);
1119         mode = (u8)GET_PCI_MODE(val64);
1120
1121         if ( val64 & PCI_MODE_UNKNOWN_MODE)
1122                 return -1;      /* Unknown PCI mode */
1123
1124         config->bus_speed = bus_speed[mode];
1125
1126         if (s2io_on_nec_bridge(nic->pdev)) {
1127                 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1128                                                         nic->dev->name);
1129                 return mode;
1130         }
1131
1132         if (val64 & PCI_MODE_32_BITS) {
1133                 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
1134         } else {
1135                 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
1136         }
1137
1138         switch(mode) {
1139                 case PCI_MODE_PCI_33:
1140                         DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
1141                         break;
1142                 case PCI_MODE_PCI_66:
1143                         DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
1144                         break;
1145                 case PCI_MODE_PCIX_M1_66:
1146                         DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
1147                         break;
1148                 case PCI_MODE_PCIX_M1_100:
1149                         DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
1150                         break;
1151                 case PCI_MODE_PCIX_M1_133:
1152                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
1153                         break;
1154                 case PCI_MODE_PCIX_M2_66:
1155                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
1156                         break;
1157                 case PCI_MODE_PCIX_M2_100:
1158                         DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
1159                         break;
1160                 case PCI_MODE_PCIX_M2_133:
1161                         DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
1162                         break;
1163                 default:
1164                         return -1;      /* Unsupported bus speed */
1165         }
1166
1167         return mode;
1168 }
1169
1170 /**
1171  *  init_tti - Initialization transmit traffic interrupt scheme
1172  *  @nic: device private variable
1173  *  @link: link status (UP/DOWN) used to enable/disable continuous
1174  *  transmit interrupts
1175  *  Description: The function configures transmit traffic interrupts
1176  *  Return Value:  SUCCESS on success and
1177  *  '-1' on failure
1178  */
1179
1180 static int init_tti(struct s2io_nic *nic, int link)
1181 {
1182         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1183         register u64 val64 = 0;
1184         int i;
1185         struct config_param *config;
1186
1187         config = &nic->config;
1188
1189         for (i = 0; i < config->tx_fifo_num; i++) {
1190                 /*
1191                  * TTI Initialization. Default Tx timer gets us about
1192                  * 250 interrupts per sec. Continuous interrupts are enabled
1193                  * by default.
1194                  */
1195                 if (nic->device_type == XFRAME_II_DEVICE) {
1196                         int count = (nic->config.bus_speed * 125)/2;
1197                         val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1198                 } else
1199                         val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1200
1201                 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1202                                 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1203                                 TTI_DATA1_MEM_TX_URNG_C(0x30) |
1204                                 TTI_DATA1_MEM_TX_TIMER_AC_EN;
1205                 if (i == 0)
1206                         if (use_continuous_tx_intrs && (link == LINK_UP))
1207                                 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1208                 writeq(val64, &bar0->tti_data1_mem);
1209
1210                 if (nic->config.intr_type == MSI_X) {
1211                         val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1212                                 TTI_DATA2_MEM_TX_UFC_B(0x100) |
1213                                 TTI_DATA2_MEM_TX_UFC_C(0x200) |
1214                                 TTI_DATA2_MEM_TX_UFC_D(0x300);
1215                 } else {
1216                         if ((nic->config.tx_steering_type ==
1217                                 TX_DEFAULT_STEERING) &&
1218                                 (config->tx_fifo_num > 1) &&
1219                                 (i >= nic->udp_fifo_idx) &&
1220                                 (i < (nic->udp_fifo_idx +
1221                                 nic->total_udp_fifos)))
1222                                 val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
1223                                         TTI_DATA2_MEM_TX_UFC_B(0x80) |
1224                                         TTI_DATA2_MEM_TX_UFC_C(0x100) |
1225                                         TTI_DATA2_MEM_TX_UFC_D(0x120);
1226                         else
1227                                 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1228                                         TTI_DATA2_MEM_TX_UFC_B(0x20) |
1229                                         TTI_DATA2_MEM_TX_UFC_C(0x40) |
1230                                         TTI_DATA2_MEM_TX_UFC_D(0x80);
1231                 }
1232
1233                 writeq(val64, &bar0->tti_data2_mem);
1234
1235                 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD |
1236                                 TTI_CMD_MEM_OFFSET(i);
1237                 writeq(val64, &bar0->tti_command_mem);
1238
1239                 if (wait_for_cmd_complete(&bar0->tti_command_mem,
1240                         TTI_CMD_MEM_STROBE_NEW_CMD, S2IO_BIT_RESET) != SUCCESS)
1241                         return FAILURE;
1242         }
1243
1244         return SUCCESS;
1245 }
1246
1247 /**
1248  *  init_nic - Initialization of hardware
1249  *  @nic: device private variable
1250  *  Description: The function sequentially configures every block
1251  *  of the H/W from their reset values.
1252  *  Return Value:  SUCCESS on success and
1253  *  '-1' on failure (endian settings incorrect).
1254  */
1255
1256 static int init_nic(struct s2io_nic *nic)
1257 {
1258         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1259         struct net_device *dev = nic->dev;
1260         register u64 val64 = 0;
1261         void __iomem *add;
1262         u32 time;
1263         int i, j;
1264         struct mac_info *mac_control;
1265         struct config_param *config;
1266         int dtx_cnt = 0;
1267         unsigned long long mem_share;
1268         int mem_size;
1269
1270         mac_control = &nic->mac_control;
1271         config = &nic->config;
1272
1273         /* to set the swapper controle on the card */
1274         if(s2io_set_swapper(nic)) {
1275                 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
1276                 return -EIO;
1277         }
1278
1279         /*
1280          * Herc requires EOI to be removed from reset before XGXS, so..
1281          */
1282         if (nic->device_type & XFRAME_II_DEVICE) {
1283                 val64 = 0xA500000000ULL;
1284                 writeq(val64, &bar0->sw_reset);
1285                 msleep(500);
1286                 val64 = readq(&bar0->sw_reset);
1287         }
1288
1289         /* Remove XGXS from reset state */
1290         val64 = 0;
1291         writeq(val64, &bar0->sw_reset);
1292         msleep(500);
1293         val64 = readq(&bar0->sw_reset);
1294
1295         /* Ensure that it's safe to access registers by checking
1296          * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1297          */
1298         if (nic->device_type == XFRAME_II_DEVICE) {
1299                 for (i = 0; i < 50; i++) {
1300                         val64 = readq(&bar0->adapter_status);
1301                         if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1302                                 break;
1303                         msleep(10);
1304                 }
1305                 if (i == 50)
1306                         return -ENODEV;
1307         }
1308
1309         /*  Enable Receiving broadcasts */
1310         add = &bar0->mac_cfg;
1311         val64 = readq(&bar0->mac_cfg);
1312         val64 |= MAC_RMAC_BCAST_ENABLE;
1313         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1314         writel((u32) val64, add);
1315         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1316         writel((u32) (val64 >> 32), (add + 4));
1317
1318         /* Read registers in all blocks */
1319         val64 = readq(&bar0->mac_int_mask);
1320         val64 = readq(&bar0->mc_int_mask);
1321         val64 = readq(&bar0->xgxs_int_mask);
1322
1323         /*  Set MTU */
1324         val64 = dev->mtu;
1325         writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1326
1327         if (nic->device_type & XFRAME_II_DEVICE) {
1328                 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1329                         SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1330                                           &bar0->dtx_control, UF);
1331                         if (dtx_cnt & 0x1)
1332                                 msleep(1); /* Necessary!! */
1333                         dtx_cnt++;
1334                 }
1335         } else {
1336                 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1337                         SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1338                                           &bar0->dtx_control, UF);
1339                         val64 = readq(&bar0->dtx_control);
1340                         dtx_cnt++;
1341                 }
1342         }
1343
1344         /*  Tx DMA Initialization */
1345         val64 = 0;
1346         writeq(val64, &bar0->tx_fifo_partition_0);
1347         writeq(val64, &bar0->tx_fifo_partition_1);
1348         writeq(val64, &bar0->tx_fifo_partition_2);
1349         writeq(val64, &bar0->tx_fifo_partition_3);
1350
1351
1352         for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1353                 val64 |=
1354                     vBIT(config->tx_cfg[i].fifo_len - 1, ((j * 32) + 19),
1355                          13) | vBIT(config->tx_cfg[i].fifo_priority,
1356                                     ((j * 32) + 5), 3);
1357
1358                 if (i == (config->tx_fifo_num - 1)) {
1359                         if (i % 2 == 0)
1360                                 i++;
1361                 }
1362
1363                 switch (i) {
1364                 case 1:
1365                         writeq(val64, &bar0->tx_fifo_partition_0);
1366                         val64 = 0;
1367                         j = 0;
1368                         break;
1369                 case 3:
1370                         writeq(val64, &bar0->tx_fifo_partition_1);
1371                         val64 = 0;
1372                         j = 0;
1373                         break;
1374                 case 5:
1375                         writeq(val64, &bar0->tx_fifo_partition_2);
1376                         val64 = 0;
1377                         j = 0;
1378                         break;
1379                 case 7:
1380                         writeq(val64, &bar0->tx_fifo_partition_3);
1381                         val64 = 0;
1382                         j = 0;
1383                         break;
1384                 default:
1385                         j++;
1386                         break;
1387                 }
1388         }
1389
1390         /*
1391          * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1392          * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1393          */
1394         if ((nic->device_type == XFRAME_I_DEVICE) &&
1395                 (nic->pdev->revision < 4))
1396                 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1397
1398         val64 = readq(&bar0->tx_fifo_partition_0);
1399         DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1400                   &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1401
1402         /*
1403          * Initialization of Tx_PA_CONFIG register to ignore packet
1404          * integrity checking.
1405          */
1406         val64 = readq(&bar0->tx_pa_cfg);
1407         val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1408             TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1409         writeq(val64, &bar0->tx_pa_cfg);
1410
1411         /* Rx DMA intialization. */
1412         val64 = 0;
1413         for (i = 0; i < config->rx_ring_num; i++) {
1414                 val64 |=
1415                     vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1416                          3);
1417         }
1418         writeq(val64, &bar0->rx_queue_priority);
1419
1420         /*
1421          * Allocating equal share of memory to all the
1422          * configured Rings.
1423          */
1424         val64 = 0;
1425         if (nic->device_type & XFRAME_II_DEVICE)
1426                 mem_size = 32;
1427         else
1428                 mem_size = 64;
1429
1430         for (i = 0; i < config->rx_ring_num; i++) {
1431                 switch (i) {
1432                 case 0:
1433                         mem_share = (mem_size / config->rx_ring_num +
1434                                      mem_size % config->rx_ring_num);
1435                         val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1436                         continue;
1437                 case 1:
1438                         mem_share = (mem_size / config->rx_ring_num);
1439                         val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1440                         continue;
1441                 case 2:
1442                         mem_share = (mem_size / config->rx_ring_num);
1443                         val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1444                         continue;
1445                 case 3:
1446                         mem_share = (mem_size / config->rx_ring_num);
1447                         val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1448                         continue;
1449                 case 4:
1450                         mem_share = (mem_size / config->rx_ring_num);
1451                         val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1452                         continue;
1453                 case 5:
1454                         mem_share = (mem_size / config->rx_ring_num);
1455                         val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1456                         continue;
1457                 case 6:
1458                         mem_share = (mem_size / config->rx_ring_num);
1459                         val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1460                         continue;
1461                 case 7:
1462                         mem_share = (mem_size / config->rx_ring_num);
1463                         val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1464                         continue;
1465                 }
1466         }
1467         writeq(val64, &bar0->rx_queue_cfg);
1468
1469         /*
1470          * Filling Tx round robin registers
1471          * as per the number of FIFOs for equal scheduling priority
1472          */
1473         switch (config->tx_fifo_num) {
1474         case 1:
1475                 val64 = 0x0;
1476                 writeq(val64, &bar0->tx_w_round_robin_0);
1477                 writeq(val64, &bar0->tx_w_round_robin_1);
1478                 writeq(val64, &bar0->tx_w_round_robin_2);
1479                 writeq(val64, &bar0->tx_w_round_robin_3);
1480                 writeq(val64, &bar0->tx_w_round_robin_4);
1481                 break;
1482         case 2:
1483                 val64 = 0x0001000100010001ULL;
1484                 writeq(val64, &bar0->tx_w_round_robin_0);
1485                 writeq(val64, &bar0->tx_w_round_robin_1);
1486                 writeq(val64, &bar0->tx_w_round_robin_2);
1487                 writeq(val64, &bar0->tx_w_round_robin_3);
1488                 val64 = 0x0001000100000000ULL;
1489                 writeq(val64, &bar0->tx_w_round_robin_4);
1490                 break;
1491         case 3:
1492                 val64 = 0x0001020001020001ULL;
1493                 writeq(val64, &bar0->tx_w_round_robin_0);
1494                 val64 = 0x0200010200010200ULL;
1495                 writeq(val64, &bar0->tx_w_round_robin_1);
1496                 val64 = 0x0102000102000102ULL;
1497                 writeq(val64, &bar0->tx_w_round_robin_2);
1498                 val64 = 0x0001020001020001ULL;
1499                 writeq(val64, &bar0->tx_w_round_robin_3);
1500                 val64 = 0x0200010200000000ULL;
1501                 writeq(val64, &bar0->tx_w_round_robin_4);
1502                 break;
1503         case 4:
1504                 val64 = 0x0001020300010203ULL;
1505                 writeq(val64, &bar0->tx_w_round_robin_0);
1506                 writeq(val64, &bar0->tx_w_round_robin_1);
1507                 writeq(val64, &bar0->tx_w_round_robin_2);
1508                 writeq(val64, &bar0->tx_w_round_robin_3);
1509                 val64 = 0x0001020300000000ULL;
1510                 writeq(val64, &bar0->tx_w_round_robin_4);
1511                 break;
1512         case 5:
1513                 val64 = 0x0001020304000102ULL;
1514                 writeq(val64, &bar0->tx_w_round_robin_0);
1515                 val64 = 0x0304000102030400ULL;
1516                 writeq(val64, &bar0->tx_w_round_robin_1);
1517                 val64 = 0x0102030400010203ULL;
1518                 writeq(val64, &bar0->tx_w_round_robin_2);
1519                 val64 = 0x0400010203040001ULL;
1520                 writeq(val64, &bar0->tx_w_round_robin_3);
1521                 val64 = 0x0203040000000000ULL;
1522                 writeq(val64, &bar0->tx_w_round_robin_4);
1523                 break;
1524         case 6:
1525                 val64 = 0x0001020304050001ULL;
1526                 writeq(val64, &bar0->tx_w_round_robin_0);
1527                 val64 = 0x0203040500010203ULL;
1528                 writeq(val64, &bar0->tx_w_round_robin_1);
1529                 val64 = 0x0405000102030405ULL;
1530                 writeq(val64, &bar0->tx_w_round_robin_2);
1531                 val64 = 0x0001020304050001ULL;
1532                 writeq(val64, &bar0->tx_w_round_robin_3);
1533                 val64 = 0x0203040500000000ULL;
1534                 writeq(val64, &bar0->tx_w_round_robin_4);
1535                 break;
1536         case 7:
1537                 val64 = 0x0001020304050600ULL;
1538                 writeq(val64, &bar0->tx_w_round_robin_0);
1539                 val64 = 0x0102030405060001ULL;
1540                 writeq(val64, &bar0->tx_w_round_robin_1);
1541                 val64 = 0x0203040506000102ULL;
1542                 writeq(val64, &bar0->tx_w_round_robin_2);
1543                 val64 = 0x0304050600010203ULL;
1544                 writeq(val64, &bar0->tx_w_round_robin_3);
1545                 val64 = 0x0405060000000000ULL;
1546                 writeq(val64, &bar0->tx_w_round_robin_4);
1547                 break;
1548         case 8:
1549                 val64 = 0x0001020304050607ULL;
1550                 writeq(val64, &bar0->tx_w_round_robin_0);
1551                 writeq(val64, &bar0->tx_w_round_robin_1);
1552                 writeq(val64, &bar0->tx_w_round_robin_2);
1553                 writeq(val64, &bar0->tx_w_round_robin_3);
1554                 val64 = 0x0001020300000000ULL;
1555                 writeq(val64, &bar0->tx_w_round_robin_4);
1556                 break;
1557         }
1558
1559         /* Enable all configured Tx FIFO partitions */
1560         val64 = readq(&bar0->tx_fifo_partition_0);
1561         val64 |= (TX_FIFO_PARTITION_EN);
1562         writeq(val64, &bar0->tx_fifo_partition_0);
1563
1564         /* Filling the Rx round robin registers as per the
1565          * number of Rings and steering based on QoS with
1566          * equal priority.
1567          */
1568         switch (config->rx_ring_num) {
1569         case 1:
1570                 val64 = 0x0;
1571                 writeq(val64, &bar0->rx_w_round_robin_0);
1572                 writeq(val64, &bar0->rx_w_round_robin_1);
1573                 writeq(val64, &bar0->rx_w_round_robin_2);
1574                 writeq(val64, &bar0->rx_w_round_robin_3);
1575                 writeq(val64, &bar0->rx_w_round_robin_4);
1576
1577                 val64 = 0x8080808080808080ULL;
1578                 writeq(val64, &bar0->rts_qos_steering);
1579                 break;
1580         case 2:
1581                 val64 = 0x0001000100010001ULL;
1582                 writeq(val64, &bar0->rx_w_round_robin_0);
1583                 writeq(val64, &bar0->rx_w_round_robin_1);
1584                 writeq(val64, &bar0->rx_w_round_robin_2);
1585                 writeq(val64, &bar0->rx_w_round_robin_3);
1586                 val64 = 0x0001000100000000ULL;
1587                 writeq(val64, &bar0->rx_w_round_robin_4);
1588
1589                 val64 = 0x8080808040404040ULL;
1590                 writeq(val64, &bar0->rts_qos_steering);
1591                 break;
1592         case 3:
1593                 val64 = 0x0001020001020001ULL;
1594                 writeq(val64, &bar0->rx_w_round_robin_0);
1595                 val64 = 0x0200010200010200ULL;
1596                 writeq(val64, &bar0->rx_w_round_robin_1);
1597                 val64 = 0x0102000102000102ULL;
1598                 writeq(val64, &bar0->rx_w_round_robin_2);
1599                 val64 = 0x0001020001020001ULL;
1600                 writeq(val64, &bar0->rx_w_round_robin_3);
1601                 val64 = 0x0200010200000000ULL;
1602                 writeq(val64, &bar0->rx_w_round_robin_4);
1603
1604                 val64 = 0x8080804040402020ULL;
1605                 writeq(val64, &bar0->rts_qos_steering);
1606                 break;
1607         case 4:
1608                 val64 = 0x0001020300010203ULL;
1609                 writeq(val64, &bar0->rx_w_round_robin_0);
1610                 writeq(val64, &bar0->rx_w_round_robin_1);
1611                 writeq(val64, &bar0->rx_w_round_robin_2);
1612                 writeq(val64, &bar0->rx_w_round_robin_3);
1613                 val64 = 0x0001020300000000ULL;
1614                 writeq(val64, &bar0->rx_w_round_robin_4);
1615
1616                 val64 = 0x8080404020201010ULL;
1617                 writeq(val64, &bar0->rts_qos_steering);
1618                 break;
1619         case 5:
1620                 val64 = 0x0001020304000102ULL;
1621                 writeq(val64, &bar0->rx_w_round_robin_0);
1622                 val64 = 0x0304000102030400ULL;
1623                 writeq(val64, &bar0->rx_w_round_robin_1);
1624                 val64 = 0x0102030400010203ULL;
1625                 writeq(val64, &bar0->rx_w_round_robin_2);
1626                 val64 = 0x0400010203040001ULL;
1627                 writeq(val64, &bar0->rx_w_round_robin_3);
1628                 val64 = 0x0203040000000000ULL;
1629                 writeq(val64, &bar0->rx_w_round_robin_4);
1630
1631                 val64 = 0x8080404020201008ULL;
1632                 writeq(val64, &bar0->rts_qos_steering);
1633                 break;
1634         case 6:
1635                 val64 = 0x0001020304050001ULL;
1636                 writeq(val64, &bar0->rx_w_round_robin_0);
1637                 val64 = 0x0203040500010203ULL;
1638                 writeq(val64, &bar0->rx_w_round_robin_1);
1639                 val64 = 0x0405000102030405ULL;
1640                 writeq(val64, &bar0->rx_w_round_robin_2);
1641                 val64 = 0x0001020304050001ULL;
1642                 writeq(val64, &bar0->rx_w_round_robin_3);
1643                 val64 = 0x0203040500000000ULL;
1644                 writeq(val64, &bar0->rx_w_round_robin_4);
1645
1646                 val64 = 0x8080404020100804ULL;
1647                 writeq(val64, &bar0->rts_qos_steering);
1648                 break;
1649         case 7:
1650                 val64 = 0x0001020304050600ULL;
1651                 writeq(val64, &bar0->rx_w_round_robin_0);
1652                 val64 = 0x0102030405060001ULL;
1653                 writeq(val64, &bar0->rx_w_round_robin_1);
1654                 val64 = 0x0203040506000102ULL;
1655                 writeq(val64, &bar0->rx_w_round_robin_2);
1656                 val64 = 0x0304050600010203ULL;
1657                 writeq(val64, &bar0->rx_w_round_robin_3);
1658                 val64 = 0x0405060000000000ULL;
1659                 writeq(val64, &bar0->rx_w_round_robin_4);
1660
1661                 val64 = 0x8080402010080402ULL;
1662                 writeq(val64, &bar0->rts_qos_steering);
1663                 break;
1664         case 8:
1665                 val64 = 0x0001020304050607ULL;
1666                 writeq(val64, &bar0->rx_w_round_robin_0);
1667                 writeq(val64, &bar0->rx_w_round_robin_1);
1668                 writeq(val64, &bar0->rx_w_round_robin_2);
1669                 writeq(val64, &bar0->rx_w_round_robin_3);
1670                 val64 = 0x0001020300000000ULL;
1671                 writeq(val64, &bar0->rx_w_round_robin_4);
1672
1673                 val64 = 0x8040201008040201ULL;
1674                 writeq(val64, &bar0->rts_qos_steering);
1675                 break;
1676         }
1677
1678         /* UDP Fix */
1679         val64 = 0;
1680         for (i = 0; i < 8; i++)
1681                 writeq(val64, &bar0->rts_frm_len_n[i]);
1682
1683         /* Set the default rts frame length for the rings configured */
1684         val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1685         for (i = 0 ; i < config->rx_ring_num ; i++)
1686                 writeq(val64, &bar0->rts_frm_len_n[i]);
1687
1688         /* Set the frame length for the configured rings
1689          * desired by the user
1690          */
1691         for (i = 0; i < config->rx_ring_num; i++) {
1692                 /* If rts_frm_len[i] == 0 then it is assumed that user not
1693                  * specified frame length steering.
1694                  * If the user provides the frame length then program
1695                  * the rts_frm_len register for those values or else
1696                  * leave it as it is.
1697                  */
1698                 if (rts_frm_len[i] != 0) {
1699                         writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1700                                 &bar0->rts_frm_len_n[i]);
1701                 }
1702         }
1703
1704         /* Disable differentiated services steering logic */
1705         for (i = 0; i < 64; i++) {
1706                 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1707                         DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1708                                 dev->name);
1709                         DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
1710                         return -ENODEV;
1711                 }
1712         }
1713
1714         /* Program statistics memory */
1715         writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1716
1717         if (nic->device_type == XFRAME_II_DEVICE) {
1718                 val64 = STAT_BC(0x320);
1719                 writeq(val64, &bar0->stat_byte_cnt);
1720         }
1721
1722         /*
1723          * Initializing the sampling rate for the device to calculate the
1724          * bandwidth utilization.
1725          */
1726         val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1727             MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1728         writeq(val64, &bar0->mac_link_util);
1729
1730         /*
1731          * Initializing the Transmit and Receive Traffic Interrupt
1732          * Scheme.
1733          */
1734
1735         /* Initialize TTI */
1736         if (SUCCESS != init_tti(nic, nic->last_link_state))
1737                 return -ENODEV;
1738
1739         /* RTI Initialization */
1740         if (nic->device_type == XFRAME_II_DEVICE) {
1741                 /*
1742                  * Programmed to generate Apprx 500 Intrs per
1743                  * second
1744                  */
1745                 int count = (nic->config.bus_speed * 125)/4;
1746                 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1747         } else
1748                 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1749         val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1750                  RTI_DATA1_MEM_RX_URNG_B(0x10) |
1751                  RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1752
1753         writeq(val64, &bar0->rti_data1_mem);
1754
1755         val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1756                 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1757         if (nic->config.intr_type == MSI_X)
1758             val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1759                         RTI_DATA2_MEM_RX_UFC_D(0x40));
1760         else
1761             val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1762                         RTI_DATA2_MEM_RX_UFC_D(0x80));
1763         writeq(val64, &bar0->rti_data2_mem);
1764
1765         for (i = 0; i < config->rx_ring_num; i++) {
1766                 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1767                                 | RTI_CMD_MEM_OFFSET(i);
1768                 writeq(val64, &bar0->rti_command_mem);
1769
1770                 /*
1771                  * Once the operation completes, the Strobe bit of the
1772                  * command register will be reset. We poll for this
1773                  * particular condition. We wait for a maximum of 500ms
1774                  * for the operation to complete, if it's not complete
1775                  * by then we return error.
1776                  */
1777                 time = 0;
1778                 while (TRUE) {
1779                         val64 = readq(&bar0->rti_command_mem);
1780                         if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1781                                 break;
1782
1783                         if (time > 10) {
1784                                 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1785                                           dev->name);
1786                                 return -ENODEV;
1787                         }
1788                         time++;
1789                         msleep(50);
1790                 }
1791         }
1792
1793         /*
1794          * Initializing proper values as Pause threshold into all
1795          * the 8 Queues on Rx side.
1796          */
1797         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1798         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1799
1800         /* Disable RMAC PAD STRIPPING */
1801         add = &bar0->mac_cfg;
1802         val64 = readq(&bar0->mac_cfg);
1803         val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1804         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1805         writel((u32) (val64), add);
1806         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1807         writel((u32) (val64 >> 32), (add + 4));
1808         val64 = readq(&bar0->mac_cfg);
1809
1810         /* Enable FCS stripping by adapter */
1811         add = &bar0->mac_cfg;
1812         val64 = readq(&bar0->mac_cfg);
1813         val64 |= MAC_CFG_RMAC_STRIP_FCS;
1814         if (nic->device_type == XFRAME_II_DEVICE)
1815                 writeq(val64, &bar0->mac_cfg);
1816         else {
1817                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1818                 writel((u32) (val64), add);
1819                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1820                 writel((u32) (val64 >> 32), (add + 4));
1821         }
1822
1823         /*
1824          * Set the time value to be inserted in the pause frame
1825          * generated by xena.
1826          */
1827         val64 = readq(&bar0->rmac_pause_cfg);
1828         val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1829         val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1830         writeq(val64, &bar0->rmac_pause_cfg);
1831
1832         /*
1833          * Set the Threshold Limit for Generating the pause frame
1834          * If the amount of data in any Queue exceeds ratio of
1835          * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1836          * pause frame is generated
1837          */
1838         val64 = 0;
1839         for (i = 0; i < 4; i++) {
1840                 val64 |=
1841                     (((u64) 0xFF00 | nic->mac_control.
1842                       mc_pause_threshold_q0q3)
1843                      << (i * 2 * 8));
1844         }
1845         writeq(val64, &bar0->mc_pause_thresh_q0q3);
1846
1847         val64 = 0;
1848         for (i = 0; i < 4; i++) {
1849                 val64 |=
1850                     (((u64) 0xFF00 | nic->mac_control.
1851                       mc_pause_threshold_q4q7)
1852                      << (i * 2 * 8));
1853         }
1854         writeq(val64, &bar0->mc_pause_thresh_q4q7);
1855
1856         /*
1857          * TxDMA will stop Read request if the number of read split has
1858          * exceeded the limit pointed by shared_splits
1859          */
1860         val64 = readq(&bar0->pic_control);
1861         val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1862         writeq(val64, &bar0->pic_control);
1863
1864         if (nic->config.bus_speed == 266) {
1865                 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1866                 writeq(0x0, &bar0->read_retry_delay);
1867                 writeq(0x0, &bar0->write_retry_delay);
1868         }
1869
1870         /*
1871          * Programming the Herc to split every write transaction
1872          * that does not start on an ADB to reduce disconnects.
1873          */
1874         if (nic->device_type == XFRAME_II_DEVICE) {
1875                 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1876                         MISC_LINK_STABILITY_PRD(3);
1877                 writeq(val64, &bar0->misc_control);
1878                 val64 = readq(&bar0->pic_control2);
1879                 val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
1880                 writeq(val64, &bar0->pic_control2);
1881         }
1882         if (strstr(nic->product_name, "CX4")) {
1883                 val64 = TMAC_AVG_IPG(0x17);
1884                 writeq(val64, &bar0->tmac_avg_ipg);
1885         }
1886
1887         return SUCCESS;
1888 }
1889 #define LINK_UP_DOWN_INTERRUPT          1
1890 #define MAC_RMAC_ERR_TIMER              2
1891
1892 static int s2io_link_fault_indication(struct s2io_nic *nic)
1893 {
1894         if (nic->device_type == XFRAME_II_DEVICE)
1895                 return LINK_UP_DOWN_INTERRUPT;
1896         else
1897                 return MAC_RMAC_ERR_TIMER;
1898 }
1899
1900 /**
1901  *  do_s2io_write_bits -  update alarm bits in alarm register
1902  *  @value: alarm bits
1903  *  @flag: interrupt status
1904  *  @addr: address value
1905  *  Description: update alarm bits in alarm register
1906  *  Return Value:
1907  *  NONE.
1908  */
1909 static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1910 {
1911         u64 temp64;
1912
1913         temp64 = readq(addr);
1914
1915         if(flag == ENABLE_INTRS)
1916                 temp64 &= ~((u64) value);
1917         else
1918                 temp64 |= ((u64) value);
1919         writeq(temp64, addr);
1920 }
1921
1922 static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1923 {
1924         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1925         register u64 gen_int_mask = 0;
1926         u64 interruptible;
1927
1928         writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask);
1929         if (mask & TX_DMA_INTR) {
1930
1931                 gen_int_mask |= TXDMA_INT_M;
1932
1933                 do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1934                                 TXDMA_PCC_INT | TXDMA_TTI_INT |
1935                                 TXDMA_LSO_INT | TXDMA_TPA_INT |
1936                                 TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1937
1938                 do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1939                                 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1940                                 PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1941                                 &bar0->pfc_err_mask);
1942
1943                 do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1944                                 TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1945                                 TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1946
1947                 do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1948                                 PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1949                                 PCC_N_SERR | PCC_6_COF_OV_ERR |
1950                                 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1951                                 PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1952                                 PCC_TXB_ECC_SG_ERR, flag, &bar0->pcc_err_mask);
1953
1954                 do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1955                                 TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1956
1957                 do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1958                                 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1959                                 LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1960                                 flag, &bar0->lso_err_mask);
1961
1962                 do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1963                                 flag, &bar0->tpa_err_mask);
1964
1965                 do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1966
1967         }
1968
1969         if (mask & TX_MAC_INTR) {
1970                 gen_int_mask |= TXMAC_INT_M;
1971                 do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1972                                 &bar0->mac_int_mask);
1973                 do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1974                                 TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1975                                 TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1976                                 flag, &bar0->mac_tmac_err_mask);
1977         }
1978
1979         if (mask & TX_XGXS_INTR) {
1980                 gen_int_mask |= TXXGXS_INT_M;
1981                 do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1982                                 &bar0->xgxs_int_mask);
1983                 do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1984                                 TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1985                                 flag, &bar0->xgxs_txgxs_err_mask);
1986         }
1987
1988         if (mask & RX_DMA_INTR) {
1989                 gen_int_mask |= RXDMA_INT_M;
1990                 do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1991                                 RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1992                                 flag, &bar0->rxdma_int_mask);
1993                 do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1994                                 RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1995                                 RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1996                                 RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1997                 do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1998                                 PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1999                                 PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
2000                                 &bar0->prc_pcix_err_mask);
2001                 do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
2002                                 RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
2003                                 &bar0->rpa_err_mask);
2004                 do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
2005                                 RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
2006                                 RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
2007                                 RDA_FRM_ECC_SG_ERR | RDA_MISC_ERR|RDA_PCIX_ERR,
2008                                 flag, &bar0->rda_err_mask);
2009                 do_s2io_write_bits(RTI_SM_ERR_ALARM |
2010                                 RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
2011                                 flag, &bar0->rti_err_mask);
2012         }
2013
2014         if (mask & RX_MAC_INTR) {
2015                 gen_int_mask |= RXMAC_INT_M;
2016                 do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
2017                                 &bar0->mac_int_mask);
2018                 interruptible = RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
2019                                 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
2020                                 RMAC_DOUBLE_ECC_ERR;
2021                 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER)
2022                         interruptible |= RMAC_LINK_STATE_CHANGE_INT;
2023                 do_s2io_write_bits(interruptible,
2024                                 flag, &bar0->mac_rmac_err_mask);
2025         }
2026
2027         if (mask & RX_XGXS_INTR)
2028         {
2029                 gen_int_mask |= RXXGXS_INT_M;
2030                 do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
2031                                 &bar0->xgxs_int_mask);
2032                 do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
2033                                 &bar0->xgxs_rxgxs_err_mask);
2034         }
2035
2036         if (mask & MC_INTR) {
2037                 gen_int_mask |= MC_INT_M;
2038                 do_s2io_write_bits(MC_INT_MASK_MC_INT, flag, &bar0->mc_int_mask);
2039                 do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
2040                                 MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
2041                                 &bar0->mc_err_mask);
2042         }
2043         nic->general_int_mask = gen_int_mask;
2044
2045         /* Remove this line when alarm interrupts are enabled */
2046         nic->general_int_mask = 0;
2047 }
2048 /**
2049  *  en_dis_able_nic_intrs - Enable or Disable the interrupts
2050  *  @nic: device private variable,
2051  *  @mask: A mask indicating which Intr block must be modified and,
2052  *  @flag: A flag indicating whether to enable or disable the Intrs.
2053  *  Description: This function will either disable or enable the interrupts
2054  *  depending on the flag argument. The mask argument can be used to
2055  *  enable/disable any Intr block.
2056  *  Return Value: NONE.
2057  */
2058
2059 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
2060 {
2061         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2062         register u64 temp64 = 0, intr_mask = 0;
2063
2064         intr_mask = nic->general_int_mask;
2065
2066         /*  Top level interrupt classification */
2067         /*  PIC Interrupts */
2068         if (mask & TX_PIC_INTR) {
2069                 /*  Enable PIC Intrs in the general intr mask register */
2070                 intr_mask |= TXPIC_INT_M;
2071                 if (flag == ENABLE_INTRS) {
2072                         /*
2073                          * If Hercules adapter enable GPIO otherwise
2074                          * disable all PCIX, Flash, MDIO, IIC and GPIO
2075                          * interrupts for now.
2076                          * TODO
2077                          */
2078                         if (s2io_link_fault_indication(nic) ==
2079                                         LINK_UP_DOWN_INTERRUPT ) {
2080                                 do_s2io_write_bits(PIC_INT_GPIO, flag,
2081                                                 &bar0->pic_int_mask);
2082                                 do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
2083                                                 &bar0->gpio_int_mask);
2084                         } else
2085                                 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2086                 } else if (flag == DISABLE_INTRS) {
2087                         /*
2088                          * Disable PIC Intrs in the general
2089                          * intr mask register
2090                          */
2091                         writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2092                 }
2093         }
2094
2095         /*  Tx traffic interrupts */
2096         if (mask & TX_TRAFFIC_INTR) {
2097                 intr_mask |= TXTRAFFIC_INT_M;
2098                 if (flag == ENABLE_INTRS) {
2099                         /*
2100                          * Enable all the Tx side interrupts
2101                          * writing 0 Enables all 64 TX interrupt levels
2102                          */
2103                         writeq(0x0, &bar0->tx_traffic_mask);
2104                 } else if (flag == DISABLE_INTRS) {
2105                         /*
2106                          * Disable Tx Traffic Intrs in the general intr mask
2107                          * register.
2108                          */
2109                         writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
2110                 }
2111         }
2112
2113         /*  Rx traffic interrupts */
2114         if (mask & RX_TRAFFIC_INTR) {
2115                 intr_mask |= RXTRAFFIC_INT_M;
2116                 if (flag == ENABLE_INTRS) {
2117                         /* writing 0 Enables all 8 RX interrupt levels */
2118                         writeq(0x0, &bar0->rx_traffic_mask);
2119                 } else if (flag == DISABLE_INTRS) {
2120                         /*
2121                          * Disable Rx Traffic Intrs in the general intr mask
2122                          * register.
2123                          */
2124                         writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
2125                 }
2126         }
2127
2128         temp64 = readq(&bar0->general_int_mask);
2129         if (flag == ENABLE_INTRS)
2130                 temp64 &= ~((u64) intr_mask);
2131         else
2132                 temp64 = DISABLE_ALL_INTRS;
2133         writeq(temp64, &bar0->general_int_mask);
2134
2135         nic->general_int_mask = readq(&bar0->general_int_mask);
2136 }
2137
2138 /**
2139  *  verify_pcc_quiescent- Checks for PCC quiescent state
2140  *  Return: 1 If PCC is quiescence
2141  *          0 If PCC is not quiescence
2142  */
2143 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
2144 {
2145         int ret = 0, herc;
2146         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2147         u64 val64 = readq(&bar0->adapter_status);
2148
2149         herc = (sp->device_type == XFRAME_II_DEVICE);
2150
2151         if (flag == FALSE) {
2152                 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2153                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2154                                 ret = 1;
2155                 } else {
2156                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2157                                 ret = 1;
2158                 }
2159         } else {
2160                 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2161                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2162                              ADAPTER_STATUS_RMAC_PCC_IDLE))
2163                                 ret = 1;
2164                 } else {
2165                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2166                              ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2167                                 ret = 1;
2168                 }
2169         }
2170
2171         return ret;
2172 }
2173 /**
2174  *  verify_xena_quiescence - Checks whether the H/W is ready
2175  *  Description: Returns whether the H/W is ready to go or not. Depending
2176  *  on whether adapter enable bit was written or not the comparison
2177  *  differs and the calling function passes the input argument flag to
2178  *  indicate this.
2179  *  Return: 1 If xena is quiescence
2180  *          0 If Xena is not quiescence
2181  */
2182
2183 static int verify_xena_quiescence(struct s2io_nic *sp)
2184 {
2185         int  mode;
2186         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2187         u64 val64 = readq(&bar0->adapter_status);
2188         mode = s2io_verify_pci_mode(sp);
2189
2190         if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2191                 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
2192                 return 0;
2193         }
2194         if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2195         DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
2196                 return 0;
2197         }
2198         if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2199                 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
2200                 return 0;
2201         }
2202         if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2203                 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
2204                 return 0;
2205         }
2206         if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2207                 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
2208                 return 0;
2209         }
2210         if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2211                 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
2212                 return 0;
2213         }
2214         if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2215                 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
2216                 return 0;
2217         }
2218         if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2219                 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
2220                 return 0;
2221         }
2222
2223         /*
2224          * In PCI 33 mode, the P_PLL is not used, and therefore,
2225          * the the P_PLL_LOCK bit in the adapter_status register will
2226          * not be asserted.
2227          */
2228         if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2229                 sp->device_type == XFRAME_II_DEVICE && mode !=
2230                 PCI_MODE_PCI_33) {
2231                 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
2232                 return 0;
2233         }
2234         if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2235                         ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2236                 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
2237                 return 0;
2238         }
2239         return 1;
2240 }
2241
2242 /**
2243  * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
2244  * @sp: Pointer to device specifc structure
2245  * Description :
2246  * New procedure to clear mac address reading  problems on Alpha platforms
2247  *
2248  */
2249
2250 static void fix_mac_address(struct s2io_nic * sp)
2251 {
2252         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2253         u64 val64;
2254         int i = 0;
2255
2256         while (fix_mac[i] != END_SIGN) {
2257                 writeq(fix_mac[i++], &bar0->gpio_control);
2258                 udelay(10);
2259                 val64 = readq(&bar0->gpio_control);
2260         }
2261 }
2262
2263 /**
2264  *  start_nic - Turns the device on
2265  *  @nic : device private variable.
2266  *  Description:
2267  *  This function actually turns the device on. Before this  function is
2268  *  called,all Registers are configured from their reset states
2269  *  and shared memory is allocated but the NIC is still quiescent. On
2270  *  calling this function, the device interrupts are cleared and the NIC is
2271  *  literally switched on by writing into the adapter control register.
2272  *  Return Value:
2273  *  SUCCESS on success and -1 on failure.
2274  */
2275
2276 static int start_nic(struct s2io_nic *nic)
2277 {
2278         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2279         struct net_device *dev = nic->dev;
2280         register u64 val64 = 0;
2281         u16 subid, i;
2282         struct mac_info *mac_control;
2283         struct config_param *config;
2284
2285         mac_control = &nic->mac_control;
2286         config = &nic->config;
2287
2288         /*  PRC Initialization and configuration */
2289         for (i = 0; i < config->rx_ring_num; i++) {
2290                 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
2291                        &bar0->prc_rxd0_n[i]);
2292
2293                 val64 = readq(&bar0->prc_ctrl_n[i]);
2294                 if (nic->rxd_mode == RXD_MODE_1)
2295                         val64 |= PRC_CTRL_RC_ENABLED;
2296                 else
2297                         val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2298                 if (nic->device_type == XFRAME_II_DEVICE)
2299                         val64 |= PRC_CTRL_GROUP_READS;
2300                 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2301                 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2302                 writeq(val64, &bar0->prc_ctrl_n[i]);
2303         }
2304
2305         if (nic->rxd_mode == RXD_MODE_3B) {
2306                 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2307                 val64 = readq(&bar0->rx_pa_cfg);
2308                 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2309                 writeq(val64, &bar0->rx_pa_cfg);
2310         }
2311
2312         if (vlan_tag_strip == 0) {
2313                 val64 = readq(&bar0->rx_pa_cfg);
2314                 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2315                 writeq(val64, &bar0->rx_pa_cfg);
2316                 vlan_strip_flag = 0;
2317         }
2318
2319         /*
2320          * Enabling MC-RLDRAM. After enabling the device, we timeout
2321          * for around 100ms, which is approximately the time required
2322          * for the device to be ready for operation.
2323          */
2324         val64 = readq(&bar0->mc_rldram_mrs);
2325         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2326         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2327         val64 = readq(&bar0->mc_rldram_mrs);
2328
2329         msleep(100);    /* Delay by around 100 ms. */
2330
2331         /* Enabling ECC Protection. */
2332         val64 = readq(&bar0->adapter_control);
2333         val64 &= ~ADAPTER_ECC_EN;
2334         writeq(val64, &bar0->adapter_control);
2335
2336         /*
2337          * Verify if the device is ready to be enabled, if so enable
2338          * it.
2339          */
2340         val64 = readq(&bar0->adapter_status);
2341         if (!verify_xena_quiescence(nic)) {
2342                 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2343                 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2344                           (unsigned long long) val64);
2345                 return FAILURE;
2346         }
2347
2348         /*
2349          * With some switches, link might be already up at this point.
2350          * Because of this weird behavior, when we enable laser,
2351          * we may not get link. We need to handle this. We cannot
2352          * figure out which switch is misbehaving. So we are forced to
2353          * make a global change.
2354          */
2355
2356         /* Enabling Laser. */
2357         val64 = readq(&bar0->adapter_control);
2358         val64 |= ADAPTER_EOI_TX_ON;
2359         writeq(val64, &bar0->adapter_control);
2360
2361         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2362                 /*
2363                  * Dont see link state interrupts initally on some switches,
2364                  * so directly scheduling the link state task here.
2365                  */
2366                 schedule_work(&nic->set_link_task);
2367         }
2368         /* SXE-002: Initialize link and activity LED */
2369         subid = nic->pdev->subsystem_device;
2370         if (((subid & 0xFF) >= 0x07) &&
2371             (nic->device_type == XFRAME_I_DEVICE)) {
2372                 val64 = readq(&bar0->gpio_control);
2373                 val64 |= 0x0000800000000000ULL;
2374                 writeq(val64, &bar0->gpio_control);
2375                 val64 = 0x0411040400000000ULL;
2376                 writeq(val64, (void __iomem *)bar0 + 0x2700);
2377         }
2378
2379         return SUCCESS;
2380 }
2381 /**
2382  * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2383  */
2384 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2385                                         TxD *txdlp, int get_off)
2386 {
2387         struct s2io_nic *nic = fifo_data->nic;
2388         struct sk_buff *skb;
2389         struct TxD *txds;
2390         u16 j, frg_cnt;
2391
2392         txds = txdlp;
2393         if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
2394                 pci_unmap_single(nic->pdev, (dma_addr_t)
2395                         txds->Buffer_Pointer, sizeof(u64),
2396                         PCI_DMA_TODEVICE);
2397                 txds++;
2398         }
2399
2400         skb = (struct sk_buff *) ((unsigned long)
2401                         txds->Host_Control);
2402         if (!skb) {
2403                 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2404                 return NULL;
2405         }
2406         pci_unmap_single(nic->pdev, (dma_addr_t)
2407                          txds->Buffer_Pointer,
2408                          skb->len - skb->data_len,
2409                          PCI_DMA_TODEVICE);
2410         frg_cnt = skb_shinfo(skb)->nr_frags;
2411         if (frg_cnt) {
2412                 txds++;
2413                 for (j = 0; j < frg_cnt; j++, txds++) {
2414                         skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2415                         if (!txds->Buffer_Pointer)
2416                                 break;
2417                         pci_unmap_page(nic->pdev, (dma_addr_t)
2418                                         txds->Buffer_Pointer,
2419                                        frag->size, PCI_DMA_TODEVICE);
2420                 }
2421         }
2422         memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
2423         return(skb);
2424 }
2425
2426 /**
2427  *  free_tx_buffers - Free all queued Tx buffers
2428  *  @nic : device private variable.
2429  *  Description:
2430  *  Free all queued Tx buffers.
2431  *  Return Value: void
2432 */
2433
2434 static void free_tx_buffers(struct s2io_nic *nic)
2435 {
2436         struct net_device *dev = nic->dev;
2437         struct sk_buff *skb;
2438         struct TxD *txdp;
2439         int i, j;
2440         struct mac_info *mac_control;
2441         struct config_param *config;
2442         int cnt = 0;
2443
2444         mac_control = &nic->mac_control;
2445         config = &nic->config;
2446
2447         for (i = 0; i < config->tx_fifo_num; i++) {
2448                 unsigned long flags;
2449                 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags);
2450                 for (j = 0; j < config->tx_cfg[i].fifo_len; j++) {
2451                         txdp = (struct TxD *) \
2452                         mac_control->fifos[i].list_info[j].list_virt_addr;
2453                         skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2454                         if (skb) {
2455                                 nic->mac_control.stats_info->sw_stat.mem_freed
2456                                         += skb->truesize;
2457                                 dev_kfree_skb(skb);
2458                                 cnt++;
2459                         }
2460                 }
2461                 DBG_PRINT(INTR_DBG,
2462                           "%s:forcibly freeing %d skbs on FIFO%d\n",
2463                           dev->name, cnt, i);
2464                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2465                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2466                 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock, flags);
2467         }
2468 }
2469
2470 /**
2471  *   stop_nic -  To stop the nic
2472  *   @nic ; device private variable.
2473  *   Description:
2474  *   This function does exactly the opposite of what the start_nic()
2475  *   function does. This function is called to stop the device.
2476  *   Return Value:
2477  *   void.
2478  */
2479
2480 static void stop_nic(struct s2io_nic *nic)
2481 {
2482         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2483         register u64 val64 = 0;
2484         u16 interruptible;
2485         struct mac_info *mac_control;
2486         struct config_param *config;
2487
2488         mac_control = &nic->mac_control;
2489         config = &nic->config;
2490
2491         /*  Disable all interrupts */
2492         en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2493         interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2494         interruptible |= TX_PIC_INTR;
2495         en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2496
2497         /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2498         val64 = readq(&bar0->adapter_control);
2499         val64 &= ~(ADAPTER_CNTL_EN);
2500         writeq(val64, &bar0->adapter_control);
2501 }
2502
2503 /**
2504  *  fill_rx_buffers - Allocates the Rx side skbs
2505  *  @ring_info: per ring structure
2506  *  @from_card_up: If this is true, we will map the buffer to get
2507  *     the dma address for buf0 and buf1 to give it to the card.
2508  *     Else we will sync the already mapped buffer to give it to the card.
2509  *  Description:
2510  *  The function allocates Rx side skbs and puts the physical
2511  *  address of these buffers into the RxD buffer pointers, so that the NIC
2512  *  can DMA the received frame into these locations.
2513  *  The NIC supports 3 receive modes, viz
2514  *  1. single buffer,
2515  *  2. three buffer and
2516  *  3. Five buffer modes.
2517  *  Each mode defines how many fragments the received frame will be split
2518  *  up into by the NIC. The frame is split into L3 header, L4 Header,
2519  *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2520  *  is split into 3 fragments. As of now only single buffer mode is
2521  *  supported.
2522  *   Return Value:
2523  *  SUCCESS on success or an appropriate -ve value on failure.
2524  */
2525
2526 static int fill_rx_buffers(struct ring_info *ring, int from_card_up)
2527 {
2528         struct sk_buff *skb;
2529         struct RxD_t *rxdp;
2530         int off, size, block_no, block_no1;
2531         u32 alloc_tab = 0;
2532         u32 alloc_cnt;
2533         u64 tmp;
2534         struct buffAdd *ba;
2535         struct RxD_t *first_rxdp = NULL;
2536         u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2537         int rxd_index = 0;
2538         struct RxD1 *rxdp1;
2539         struct RxD3 *rxdp3;
2540         struct swStat *stats = &ring->nic->mac_control.stats_info->sw_stat;
2541
2542         alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
2543
2544         block_no1 = ring->rx_curr_get_info.block_index;
2545         while (alloc_tab < alloc_cnt) {
2546                 block_no = ring->rx_curr_put_info.block_index;
2547
2548                 off = ring->rx_curr_put_info.offset;
2549
2550                 rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2551
2552                 rxd_index = off + 1;
2553                 if (block_no)
2554                         rxd_index += (block_no * ring->rxd_count);
2555
2556                 if ((block_no == block_no1) &&
2557                         (off == ring->rx_curr_get_info.offset) &&
2558                         (rxdp->Host_Control)) {
2559                         DBG_PRINT(INTR_DBG, "%s: Get and Put",
2560                                 ring->dev->name);
2561                         DBG_PRINT(INTR_DBG, " info equated\n");
2562                         goto end;
2563                 }
2564                 if (off && (off == ring->rxd_count)) {
2565                         ring->rx_curr_put_info.block_index++;
2566                         if (ring->rx_curr_put_info.block_index ==
2567                                                         ring->block_count)
2568                                 ring->rx_curr_put_info.block_index = 0;
2569                         block_no = ring->rx_curr_put_info.block_index;
2570                         off = 0;
2571                         ring->rx_curr_put_info.offset = off;
2572                         rxdp = ring->rx_blocks[block_no].block_virt_addr;
2573                         DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2574                                   ring->dev->name, rxdp);
2575
2576                 }
2577
2578                 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2579                         ((ring->rxd_mode == RXD_MODE_3B) &&
2580                                 (rxdp->Control_2 & s2BIT(0)))) {
2581                         ring->rx_curr_put_info.offset = off;
2582                         goto end;
2583                 }
2584                 /* calculate size of skb based on ring mode */
2585                 size = ring->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2586                                 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2587                 if (ring->rxd_mode == RXD_MODE_1)
2588                         size += NET_IP_ALIGN;
2589                 else
2590                         size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2591
2592                 /* allocate skb */
2593                 skb = dev_alloc_skb(size);
2594                 if(!skb) {
2595                         DBG_PRINT(INFO_DBG, "%s: Out of ", ring->dev->name);
2596                         DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n");
2597                         if (first_rxdp) {
2598                                 wmb();
2599                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2600                         }
2601                         stats->mem_alloc_fail_cnt++;
2602
2603                         return -ENOMEM ;
2604                 }
2605                 stats->mem_allocated += skb->truesize;
2606
2607                 if (ring->rxd_mode == RXD_MODE_1) {
2608                         /* 1 buffer mode - normal operation mode */
2609                         rxdp1 = (struct RxD1*)rxdp;
2610                         memset(rxdp, 0, sizeof(struct RxD1));
2611                         skb_reserve(skb, NET_IP_ALIGN);
2612                         rxdp1->Buffer0_ptr = pci_map_single
2613                             (ring->pdev, skb->data, size - NET_IP_ALIGN,
2614                                 PCI_DMA_FROMDEVICE);
2615                         if(pci_dma_mapping_error(rxdp1->Buffer0_ptr))
2616                                 goto pci_map_failed;
2617
2618                         rxdp->Control_2 =
2619                                 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2620                         rxdp->Host_Control = (unsigned long) (skb);
2621                 } else if (ring->rxd_mode == RXD_MODE_3B) {
2622                         /*
2623                          * 2 buffer mode -
2624                          * 2 buffer mode provides 128
2625                          * byte aligned receive buffers.
2626                          */
2627
2628                         rxdp3 = (struct RxD3*)rxdp;
2629                         /* save buffer pointers to avoid frequent dma mapping */
2630                         Buffer0_ptr = rxdp3->Buffer0_ptr;
2631                         Buffer1_ptr = rxdp3->Buffer1_ptr;
2632                         memset(rxdp, 0, sizeof(struct RxD3));
2633                         /* restore the buffer pointers for dma sync*/
2634                         rxdp3->Buffer0_ptr = Buffer0_ptr;
2635                         rxdp3->Buffer1_ptr = Buffer1_ptr;
2636
2637                         ba = &ring->ba[block_no][off];
2638                         skb_reserve(skb, BUF0_LEN);
2639                         tmp = (u64)(unsigned long) skb->data;
2640                         tmp += ALIGN_SIZE;
2641                         tmp &= ~ALIGN_SIZE;
2642                         skb->data = (void *) (unsigned long)tmp;
2643                         skb_reset_tail_pointer(skb);
2644
2645                         if (from_card_up) {
2646                                 rxdp3->Buffer0_ptr =
2647                                    pci_map_single(ring->pdev, ba->ba_0,
2648                                         BUF0_LEN, PCI_DMA_FROMDEVICE);
2649                                 if (pci_dma_mapping_error(rxdp3->Buffer0_ptr))
2650                                         goto pci_map_failed;
2651                         } else
2652                                 pci_dma_sync_single_for_device(ring->pdev,
2653                                 (dma_addr_t) rxdp3->Buffer0_ptr,
2654                                     BUF0_LEN, PCI_DMA_FROMDEVICE);
2655
2656                         rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2657                         if (ring->rxd_mode == RXD_MODE_3B) {
2658                                 /* Two buffer mode */
2659
2660                                 /*
2661                                  * Buffer2 will have L3/L4 header plus
2662                                  * L4 payload
2663                                  */
2664                                 rxdp3->Buffer2_ptr = pci_map_single
2665                                 (ring->pdev, skb->data, ring->mtu + 4,
2666                                                 PCI_DMA_FROMDEVICE);
2667
2668                                 if (pci_dma_mapping_error(rxdp3->Buffer2_ptr))
2669                                         goto pci_map_failed;
2670
2671                                 if (from_card_up) {
2672                                         rxdp3->Buffer1_ptr =
2673                                                 pci_map_single(ring->pdev,
2674                                                 ba->ba_1, BUF1_LEN,
2675                                                 PCI_DMA_FROMDEVICE);
2676
2677                                         if (pci_dma_mapping_error
2678                                                 (rxdp3->Buffer1_ptr)) {
2679                                                 pci_unmap_single
2680                                                         (ring->pdev,
2681                                                     (dma_addr_t)(unsigned long)
2682                                                         skb->data,
2683                                                         ring->mtu + 4,
2684                                                         PCI_DMA_FROMDEVICE);
2685                                                 goto pci_map_failed;
2686                                         }
2687                                 }
2688                                 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2689                                 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2690                                                                 (ring->mtu + 4);
2691                         }
2692                         rxdp->Control_2 |= s2BIT(0);
2693                         rxdp->Host_Control = (unsigned long) (skb);
2694                 }
2695                 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2696                         rxdp->Control_1 |= RXD_OWN_XENA;
2697                 off++;
2698                 if (off == (ring->rxd_count + 1))
2699                         off = 0;
2700                 ring->rx_curr_put_info.offset = off;
2701
2702                 rxdp->Control_2 |= SET_RXD_MARKER;
2703                 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2704                         if (first_rxdp) {
2705                                 wmb();
2706                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2707                         }
2708                         first_rxdp = rxdp;
2709                 }
2710                 ring->rx_bufs_left += 1;
2711                 alloc_tab++;
2712         }
2713
2714       end:
2715         /* Transfer ownership of first descriptor to adapter just before
2716          * exiting. Before that, use memory barrier so that ownership
2717          * and other fields are seen by adapter correctly.
2718          */
2719         if (first_rxdp) {
2720                 wmb();
2721                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2722         }
2723
2724         return SUCCESS;
2725 pci_map_failed:
2726         stats->pci_map_fail_cnt++;
2727         stats->mem_freed += skb->truesize;
2728         dev_kfree_skb_irq(skb);
2729         return -ENOMEM;
2730 }
2731
2732 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2733 {
2734         struct net_device *dev = sp->dev;
2735         int j;
2736         struct sk_buff *skb;
2737         struct RxD_t *rxdp;
2738         struct mac_info *mac_control;
2739         struct buffAdd *ba;
2740         struct RxD1 *rxdp1;
2741         struct RxD3 *rxdp3;
2742
2743         mac_control = &sp->mac_control;
2744         for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2745                 rxdp = mac_control->rings[ring_no].
2746                                 rx_blocks[blk].rxds[j].virt_addr;
2747                 skb = (struct sk_buff *)
2748                         ((unsigned long) rxdp->Host_Control);
2749                 if (!skb) {
2750                         continue;
2751                 }
2752                 if (sp->rxd_mode == RXD_MODE_1) {
2753                         rxdp1 = (struct RxD1*)rxdp;
2754                         pci_unmap_single(sp->pdev, (dma_addr_t)
2755                                 rxdp1->Buffer0_ptr,
2756                                 dev->mtu +
2757                                 HEADER_ETHERNET_II_802_3_SIZE
2758                                 + HEADER_802_2_SIZE +
2759                                 HEADER_SNAP_SIZE,
2760                                 PCI_DMA_FROMDEVICE);
2761                         memset(rxdp, 0, sizeof(struct RxD1));
2762                 } else if(sp->rxd_mode == RXD_MODE_3B) {
2763                         rxdp3 = (struct RxD3*)rxdp;
2764                         ba = &mac_control->rings[ring_no].
2765                                 ba[blk][j];
2766                         pci_unmap_single(sp->pdev, (dma_addr_t)
2767                                 rxdp3->Buffer0_ptr,
2768                                 BUF0_LEN,
2769                                 PCI_DMA_FROMDEVICE);
2770                         pci_unmap_single(sp->pdev, (dma_addr_t)
2771                                 rxdp3->Buffer1_ptr,
2772                                 BUF1_LEN,
2773                                 PCI_DMA_FROMDEVICE);
2774                         pci_unmap_single(sp->pdev, (dma_addr_t)
2775                                 rxdp3->Buffer2_ptr,
2776                                 dev->mtu + 4,
2777                                 PCI_DMA_FROMDEVICE);
2778                         memset(rxdp, 0, sizeof(struct RxD3));
2779                 }
2780                 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2781                 dev_kfree_skb(skb);
2782                 mac_control->rings[ring_no].rx_bufs_left -= 1;
2783         }
2784 }
2785
2786 /**
2787  *  free_rx_buffers - Frees all Rx buffers
2788  *  @sp: device private variable.
2789  *  Description:
2790  *  This function will free all Rx buffers allocated by host.
2791  *  Return Value:
2792  *  NONE.
2793  */
2794
2795 static void free_rx_buffers(struct s2io_nic *sp)
2796 {
2797         struct net_device *dev = sp->dev;
2798         int i, blk = 0, buf_cnt = 0;
2799         struct mac_info *mac_control;
2800         struct config_param *config;
2801
2802         mac_control = &sp->mac_control;
2803         config = &sp->config;
2804
2805         for (i = 0; i < config->rx_ring_num; i++) {
2806                 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2807                         free_rxd_blk(sp,i,blk);
2808
2809                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2810                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2811                 mac_control->rings[i].rx_curr_put_info.offset = 0;
2812                 mac_control->rings[i].rx_curr_get_info.offset = 0;
2813                 mac_control->rings[i].rx_bufs_left = 0;
2814                 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2815                           dev->name, buf_cnt, i);
2816         }
2817 }
2818
2819 static int s2io_chk_rx_buffers(struct ring_info *ring)
2820 {
2821         if (fill_rx_buffers(ring, 0) == -ENOMEM) {
2822                 DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name);
2823                 DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
2824         }
2825         return 0;
2826 }
2827
2828 /**
2829  * s2io_poll - Rx interrupt handler for NAPI support
2830  * @napi : pointer to the napi structure.
2831  * @budget : The number of packets that were budgeted to be processed
2832  * during  one pass through the 'Poll" function.
2833  * Description:
2834  * Comes into picture only if NAPI support has been incorporated. It does
2835  * the same thing that rx_intr_handler does, but not in a interrupt context
2836  * also It will process only a given number of packets.
2837  * Return value:
2838  * 0 on success and 1 if there are No Rx packets to be processed.
2839  */
2840
2841 static int s2io_poll_msix(struct napi_struct *napi, int budget)
2842 {
2843         struct ring_info *ring = container_of(napi, struct ring_info, napi);
2844         struct net_device *dev = ring->dev;
2845         struct config_param *config;
2846         struct mac_info *mac_control;
2847         int pkts_processed = 0;
2848         u8 __iomem *addr = NULL;
2849         u8 val8 = 0;
2850         struct s2io_nic *nic = dev->priv;
2851         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2852         int budget_org = budget;
2853
2854         config = &nic->config;
2855         mac_control = &nic->mac_control;
2856
2857         if (unlikely(!is_s2io_card_up(nic)))
2858                 return 0;
2859
2860         pkts_processed = rx_intr_handler(ring, budget);
2861         s2io_chk_rx_buffers(ring);
2862
2863         if (pkts_processed < budget_org) {
2864                 netif_rx_complete(dev, napi);
2865                 /*Re Enable MSI-Rx Vector*/
2866                 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
2867                 addr += 7 - ring->ring_no;
2868                 val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
2869                 writeb(val8, addr);
2870                 val8 = readb(addr);
2871         }
2872         return pkts_processed;
2873 }
2874 static int s2io_poll_inta(struct napi_struct *napi, int budget)
2875 {
2876         struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2877         struct ring_info *ring;
2878         struct net_device *dev = nic->dev;
2879         struct config_param *config;
2880         struct mac_info *mac_control;
2881         int pkts_processed = 0;
2882         int ring_pkts_processed, i;
2883         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2884         int budget_org = budget;
2885
2886         config = &nic->config;
2887         mac_control = &nic->mac_control;
2888
2889         if (unlikely(!is_s2io_card_up(nic)))
2890                 return 0;
2891
2892         for (i = 0; i < config->rx_ring_num; i++) {
2893                 ring = &mac_control->rings[i];
2894                 ring_pkts_processed = rx_intr_handler(ring, budget);
2895                 s2io_chk_rx_buffers(ring);
2896                 pkts_processed += ring_pkts_processed;
2897                 budget -= ring_pkts_processed;
2898                 if (budget <= 0)
2899                         break;
2900         }
2901         if (pkts_processed < budget_org) {
2902                 netif_rx_complete(dev, napi);
2903                 /* Re enable the Rx interrupts for the ring */
2904                 writeq(0, &bar0->rx_traffic_mask);
2905                 readl(&bar0->rx_traffic_mask);
2906         }
2907         return pkts_processed;
2908 }
2909
2910 #ifdef CONFIG_NET_POLL_CONTROLLER
2911 /**
2912  * s2io_netpoll - netpoll event handler entry point
2913  * @dev : pointer to the device structure.
2914  * Description:
2915  *      This function will be called by upper layer to check for events on the
2916  * interface in situations where interrupts are disabled. It is used for
2917  * specific in-kernel networking tasks, such as remote consoles and kernel
2918  * debugging over the network (example netdump in RedHat).
2919  */
2920 static void s2io_netpoll(struct net_device *dev)
2921 {
2922         struct s2io_nic *nic = dev->priv;
2923         struct mac_info *mac_control;
2924         struct config_param *config;
2925         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2926         u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2927         int i;
2928
2929         if (pci_channel_offline(nic->pdev))
2930                 return;
2931
2932         disable_irq(dev->irq);
2933
2934         mac_control = &nic->mac_control;
2935         config = &nic->config;
2936
2937         writeq(val64, &bar0->rx_traffic_int);
2938         writeq(val64, &bar0->tx_traffic_int);
2939
2940         /* we need to free up the transmitted skbufs or else netpoll will
2941          * run out of skbs and will fail and eventually netpoll application such
2942          * as netdump will fail.
2943          */
2944         for (i = 0; i < config->tx_fifo_num; i++)
2945                 tx_intr_handler(&mac_control->fifos[i]);
2946
2947         /* check for received packet and indicate up to network */
2948         for (i = 0; i < config->rx_ring_num; i++)
2949                 rx_intr_handler(&mac_control->rings[i], 0);
2950
2951         for (i = 0; i < config->rx_ring_num; i++) {
2952                 if (fill_rx_buffers(&mac_control->rings[i], 0) == -ENOMEM) {
2953                         DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2954                         DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
2955                         break;
2956                 }
2957         }
2958         enable_irq(dev->irq);
2959         return;
2960 }
2961 #endif
2962
2963 /**
2964  *  rx_intr_handler - Rx interrupt handler
2965  *  @ring_info: per ring structure.
2966  *  @budget: budget for napi processing.
2967  *  Description:
2968  *  If the interrupt is because of a received frame or if the
2969  *  receive ring contains fresh as yet un-processed frames,this function is
2970  *  called. It picks out the RxD at which place the last Rx processing had
2971  *  stopped and sends the skb to the OSM's Rx handler and then increments
2972  *  the offset.
2973  *  Return Value:
2974  *  No. of napi packets processed.
2975  */
2976 static int rx_intr_handler(struct ring_info *ring_data, int budget)
2977 {
2978         int get_block, put_block;
2979         struct rx_curr_get_info get_info, put_info;
2980         struct RxD_t *rxdp;
2981         struct sk_buff *skb;
2982         int pkt_cnt = 0, napi_pkts = 0;
2983         int i;
2984         struct RxD1* rxdp1;
2985         struct RxD3* rxdp3;
2986
2987         get_info = ring_data->rx_curr_get_info;
2988         get_block = get_info.block_index;
2989         memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2990         put_block = put_info.block_index;
2991         rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2992
2993         while (RXD_IS_UP2DT(rxdp)) {
2994                 /*
2995                  * If your are next to put index then it's
2996                  * FIFO full condition
2997                  */
2998                 if ((get_block == put_block) &&
2999                     (get_info.offset + 1) == put_info.offset) {
3000                         DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
3001                                 ring_data->dev->name);
3002                         break;
3003                 }
3004                 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
3005                 if (skb == NULL) {
3006                         DBG_PRINT(ERR_DBG, "%s: The skb is ",
3007                                   ring_data->dev->name);
3008                         DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
3009                         return 0;
3010                 }
3011                 if (ring_data->rxd_mode == RXD_MODE_1) {
3012                         rxdp1 = (struct RxD1*)rxdp;
3013                         pci_unmap_single(ring_data->pdev, (dma_addr_t)
3014                                 rxdp1->Buffer0_ptr,
3015                                 ring_data->mtu +
3016                                 HEADER_ETHERNET_II_802_3_SIZE +
3017                                 HEADER_802_2_SIZE +
3018                                 HEADER_SNAP_SIZE,
3019                                 PCI_DMA_FROMDEVICE);
3020                 } else if (ring_data->rxd_mode == RXD_MODE_3B) {
3021                         rxdp3 = (struct RxD3*)rxdp;
3022                         pci_dma_sync_single_for_cpu(ring_data->pdev, (dma_addr_t)
3023                                 rxdp3->Buffer0_ptr,
3024                                 BUF0_LEN, PCI_DMA_FROMDEVICE);
3025                         pci_unmap_single(ring_data->pdev, (dma_addr_t)
3026                                 rxdp3->Buffer2_ptr,
3027                                 ring_data->mtu + 4,
3028                                 PCI_DMA_FROMDEVICE);
3029                 }
3030                 prefetch(skb->data);
3031                 rx_osm_handler(ring_data, rxdp);
3032                 get_info.offset++;
3033                 ring_data->rx_curr_get_info.offset = get_info.offset;
3034                 rxdp = ring_data->rx_blocks[get_block].
3035                                 rxds[get_info.offset].virt_addr;
3036                 if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
3037                         get_info.offset = 0;
3038                         ring_data->rx_curr_get_info.offset = get_info.offset;
3039                         get_block++;
3040                         if (get_block == ring_data->block_count)
3041                                 get_block = 0;
3042                         ring_data->rx_curr_get_info.block_index = get_block;
3043                         rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
3044                 }
3045
3046                 if (ring_data->nic->config.napi) {
3047                         budget--;
3048                         napi_pkts++;
3049                         if (!budget)
3050                                 break;
3051                 }
3052                 pkt_cnt++;
3053                 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
3054                         break;
3055         }
3056         if (ring_data->lro) {
3057                 /* Clear all LRO sessions before exiting */
3058                 for (i=0; i<MAX_LRO_SESSIONS; i++) {
3059                         struct lro *lro = &ring_data->lro0_n[i];
3060                         if (lro->in_use) {
3061                                 update_L3L4_header(ring_data->nic, lro);
3062                                 queue_rx_frame(lro->parent, lro->vlan_tag);
3063                                 clear_lro_session(lro);
3064                         }
3065                 }
3066         }
3067         return(napi_pkts);
3068 }
3069
3070 /**
3071  *  tx_intr_handler - Transmit interrupt handler
3072  *  @nic : device private variable
3073  *  Description:
3074  *  If an interrupt was raised to indicate DMA complete of the
3075  *  Tx packet, this function is called. It identifies the last TxD
3076  *  whose buffer was freed and frees all skbs whose data have already
3077  *  DMA'ed into the NICs internal memory.
3078  *  Return Value:
3079  *  NONE
3080  */
3081
3082 static void tx_intr_handler(struct fifo_info *fifo_data)
3083 {
3084         struct s2io_nic *nic = fifo_data->nic;
3085         struct tx_curr_get_info get_info, put_info;
3086         struct sk_buff *skb = NULL;
3087         struct TxD *txdlp;
3088         int pkt_cnt = 0;
3089         unsigned long flags = 0;
3090         u8 err_mask;
3091
3092         if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
3093                         return;
3094
3095         get_info = fifo_data->tx_curr_get_info;
3096         memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
3097         txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
3098             list_virt_addr;
3099         while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3100                (get_info.offset != put_info.offset) &&
3101                (txdlp->Host_Control)) {
3102                 /* Check for TxD errors */
3103                 if (txdlp->Control_1 & TXD_T_CODE) {
3104                         unsigned long long err;
3105                         err = txdlp->Control_1 & TXD_T_CODE;
3106                         if (err & 0x1) {
3107                                 nic->mac_control.stats_info->sw_stat.
3108                                                 parity_err_cnt++;
3109                         }
3110
3111                         /* update t_code statistics */
3112                         err_mask = err >> 48;
3113                         switch(err_mask) {
3114                                 case 2:
3115                                         nic->mac_control.stats_info->sw_stat.
3116                                                         tx_buf_abort_cnt++;
3117                                 break;
3118
3119                                 case 3:
3120                                         nic->mac_control.stats_info->sw_stat.
3121                                                         tx_desc_abort_cnt++;
3122                                 break;
3123
3124                                 case 7:
3125                                         nic->mac_control.stats_info->sw_stat.
3126                                                         tx_parity_err_cnt++;
3127                                 break;
3128
3129                                 case 10:
3130                                         nic->mac_control.stats_info->sw_stat.
3131                                                         tx_link_loss_cnt++;
3132                                 break;
3133
3134                                 case 15:
3135                                         nic->mac_control.stats_info->sw_stat.
3136                                                         tx_list_proc_err_cnt++;
3137                                 break;
3138                         }
3139                 }
3140
3141                 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
3142                 if (skb == NULL) {
3143                         spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3144                         DBG_PRINT(ERR_DBG, "%s: Null skb ",
3145                         __FUNCTION__);
3146                         DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
3147                         return;
3148                 }
3149                 pkt_cnt++;
3150
3151                 /* Updating the statistics block */
3152                 nic->stats.tx_bytes += skb->len;
3153                 nic->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
3154                 dev_kfree_skb_irq(skb);
3155
3156                 get_info.offset++;
3157                 if (get_info.offset == get_info.fifo_len + 1)
3158                         get_info.offset = 0;
3159                 txdlp = (struct TxD *) fifo_data->list_info
3160                     [get_info.offset].list_virt_addr;
3161                 fifo_data->tx_curr_get_info.offset =
3162                     get_info.offset;
3163         }
3164
3165         s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
3166
3167         spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3168 }
3169
3170 /**
3171  *  s2io_mdio_write - Function to write in to MDIO registers
3172  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3173  *  @addr     : address value
3174  *  @value    : data value
3175  *  @dev      : pointer to net_device structure
3176  *  Description:
3177  *  This function is used to write values to the MDIO registers
3178  *  NONE
3179  */
3180 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
3181 {
3182         u64 val64 = 0x0;
3183         struct s2io_nic *sp = dev->priv;
3184         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3185
3186         //address transaction
3187         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3188                         | MDIO_MMD_DEV_ADDR(mmd_type)
3189                         | MDIO_MMS_PRT_ADDR(0x0);
3190         writeq(val64, &bar0->mdio_control);
3191         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3192         writeq(val64, &bar0->mdio_control);
3193         udelay(100);
3194
3195         //Data transaction
3196         val64 = 0x0;
3197         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3198                         | MDIO_MMD_DEV_ADDR(mmd_type)
3199                         | MDIO_MMS_PRT_ADDR(0x0)
3200                         | MDIO_MDIO_DATA(value)
3201                         | MDIO_OP(MDIO_OP_WRITE_TRANS);
3202         writeq(val64, &bar0->mdio_control);
3203         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3204         writeq(val64, &bar0->mdio_control);
3205         udelay(100);
3206
3207         val64 = 0x0;
3208         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3209         | MDIO_MMD_DEV_ADDR(mmd_type)
3210         | MDIO_MMS_PRT_ADDR(0x0)
3211         | MDIO_OP(MDIO_OP_READ_TRANS);
3212         writeq(val64, &bar0->mdio_control);
3213         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3214         writeq(val64, &bar0->mdio_control);
3215         udelay(100);
3216
3217 }
3218
3219 /**
3220  *  s2io_mdio_read - Function to write in to MDIO registers
3221  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3222  *  @addr     : address value
3223  *  @dev      : pointer to net_device structure
3224  *  Description:
3225  *  This function is used to read values to the MDIO registers
3226  *  NONE
3227  */
3228 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3229 {
3230         u64 val64 = 0x0;
3231         u64 rval64 = 0x0;
3232         struct s2io_nic *sp = dev->priv;
3233         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3234
3235         /* address transaction */
3236         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3237                         | MDIO_MMD_DEV_ADDR(mmd_type)
3238                         | MDIO_MMS_PRT_ADDR(0x0);
3239         writeq(val64, &bar0->mdio_control);
3240         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3241         writeq(val64, &bar0->mdio_control);
3242         udelay(100);
3243
3244         /* Data transaction */
3245         val64 = 0x0;
3246         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3247                         | MDIO_MMD_DEV_ADDR(mmd_type)
3248                         | MDIO_MMS_PRT_ADDR(0x0)
3249                         | MDIO_OP(MDIO_OP_READ_TRANS);
3250         writeq(val64, &bar0->mdio_control);
3251         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3252         writeq(val64, &bar0->mdio_control);
3253         udelay(100);
3254
3255         /* Read the value from regs */
3256         rval64 = readq(&bar0->mdio_control);
3257         rval64 = rval64 & 0xFFFF0000;
3258         rval64 = rval64 >> 16;
3259         return rval64;
3260 }
3261 /**
3262  *  s2io_chk_xpak_counter - Function to check the status of the xpak counters
3263  *  @counter      : couter value to be updated
3264  *  @flag         : flag to indicate the status
3265  *  @type         : counter type
3266  *  Description:
3267  *  This function is to check the status of the xpak counters value
3268  *  NONE
3269  */
3270
3271 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
3272 {
3273         u64 mask = 0x3;
3274         u64 val64;
3275         int i;
3276         for(i = 0; i <index; i++)
3277                 mask = mask << 0x2;
3278
3279         if(flag > 0)
3280         {
3281                 *counter = *counter + 1;
3282                 val64 = *regs_stat & mask;
3283                 val64 = val64 >> (index * 0x2);
3284                 val64 = val64 + 1;
3285                 if(val64 == 3)
3286                 {
3287                         switch(type)
3288                         {
3289                         case 1:
3290                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3291                                           "service. Excessive temperatures may "
3292                                           "result in premature transceiver "
3293                                           "failure \n");
3294                         break;
3295                         case 2:
3296                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3297                                           "service Excessive bias currents may "
3298                                           "indicate imminent laser diode "
3299                                           "failure \n");
3300                         break;
3301                         case 3:
3302                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3303                                           "service Excessive laser output "
3304                                           "power may saturate far-end "
3305                                           "receiver\n");
3306                         break;
3307                         default:
3308                                 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
3309                                           "type \n");
3310                         }
3311                         val64 = 0x0;
3312                 }
3313                 val64 = val64 << (index * 0x2);
3314                 *regs_stat = (*regs_stat & (~mask)) | (val64);
3315
3316         } else {
3317                 *regs_stat = *regs_stat & (~mask);
3318         }
3319 }
3320
3321 /**
3322  *  s2io_updt_xpak_counter - Function to update the xpak counters
3323  *  @dev         : pointer to net_device struct
3324  *  Description:
3325  *  This function is to upate the status of the xpak counters value
3326  *  NONE
3327  */
3328 static void s2io_updt_xpak_counter(struct net_device *dev)
3329 {
3330         u16 flag  = 0x0;
3331         u16 type  = 0x0;
3332         u16 val16 = 0x0;
3333         u64 val64 = 0x0;
3334         u64 addr  = 0x0;
3335
3336         struct s2io_nic *sp = dev->priv;
3337         struct stat_block *stat_info = sp->mac_control.stats_info;
3338
3339         /* Check the communication with the MDIO slave */
3340         addr = 0x0000;
3341         val64 = 0x0;
3342         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3343         if((val64 == 0xFFFF) || (val64 == 0x0000))
3344         {
3345                 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3346                           "Returned %llx\n", (unsigned long long)val64);
3347                 return;
3348         }
3349
3350         /* Check for the expecte value of 2040 at PMA address 0x0000 */
3351         if(val64 != 0x2040)
3352         {
3353                 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3354                 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3355                           (unsigned long long)val64);
3356                 return;
3357         }
3358
3359         /* Loading the DOM register to MDIO register */
3360         addr = 0xA100;
3361         s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3362         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3363
3364         /* Reading the Alarm flags */
3365         addr = 0xA070;
3366         val64 = 0x0;
3367         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3368
3369         flag = CHECKBIT(val64, 0x7);
3370         type = 1;
3371         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3372                                 &stat_info->xpak_stat.xpak_regs_stat,
3373                                 0x0, flag, type);
3374
3375         if(CHECKBIT(val64, 0x6))
3376                 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3377
3378         flag = CHECKBIT(val64, 0x3);
3379         type = 2;
3380         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3381                                 &stat_info->xpak_stat.xpak_regs_stat,
3382                                 0x2, flag, type);
3383
3384         if(CHECKBIT(val64, 0x2))
3385                 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3386
3387         flag = CHECKBIT(val64, 0x1);
3388         type = 3;
3389         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3390                                 &stat_info->xpak_stat.xpak_regs_stat,
3391                                 0x4, flag, type);
3392
3393         if(CHECKBIT(val64, 0x0))
3394                 stat_info->xpak_stat.alarm_laser_output_power_low++;
3395
3396         /* Reading the Warning flags */
3397         addr = 0xA074;
3398         val64 = 0x0;
3399         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3400
3401         if(CHECKBIT(val64, 0x7))
3402                 stat_info->xpak_stat.warn_transceiver_temp_high++;
3403
3404         if(CHECKBIT(val64, 0x6))
3405                 stat_info->xpak_stat.warn_transceiver_temp_low++;
3406
3407         if(CHECKBIT(val64, 0x3))
3408                 stat_info->xpak_stat.warn_laser_bias_current_high++;
3409
3410         if(CHECKBIT(val64, 0x2))
3411                 stat_info->xpak_stat.warn_laser_bias_current_low++;
3412
3413         if(CHECKBIT(val64, 0x1))
3414                 stat_info->xpak_stat.warn_laser_output_power_high++;
3415
3416         if(CHECKBIT(val64, 0x0))
3417                 stat_info->xpak_stat.warn_laser_output_power_low++;
3418 }
3419
3420 /**
3421  *  wait_for_cmd_complete - waits for a command to complete.
3422  *  @sp : private member of the device structure, which is a pointer to the
3423  *  s2io_nic structure.
3424  *  Description: Function that waits for a command to Write into RMAC
3425  *  ADDR DATA registers to be completed and returns either success or
3426  *  error depending on whether the command was complete or not.
3427  *  Return value:
3428  *   SUCCESS on success and FAILURE on failure.
3429  */
3430
3431 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3432                                 int bit_state)
3433 {
3434         int ret = FAILURE, cnt = 0, delay = 1;
3435         u64 val64;
3436
3437         if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3438                 return FAILURE;
3439
3440         do {
3441                 val64 = readq(addr);
3442                 if (bit_state == S2IO_BIT_RESET) {
3443                         if (!(val64 & busy_bit)) {
3444                                 ret = SUCCESS;
3445                                 break;
3446                         }
3447                 } else {
3448                         if (!(val64 & busy_bit)) {
3449                                 ret = SUCCESS;
3450                                 break;
3451                         }
3452                 }
3453
3454                 if(in_interrupt())
3455                         mdelay(delay);
3456                 else
3457                         msleep(delay);
3458
3459                 if (++cnt >= 10)
3460                         delay = 50;
3461         } while (cnt < 20);
3462         return ret;
3463 }
3464 /*
3465  * check_pci_device_id - Checks if the device id is supported
3466  * @id : device id
3467  * Description: Function to check if the pci device id is supported by driver.
3468  * Return value: Actual device id if supported else PCI_ANY_ID
3469  */
3470 static u16 check_pci_device_id(u16 id)
3471 {
3472         switch (id) {
3473         case PCI_DEVICE_ID_HERC_WIN:
3474         case PCI_DEVICE_ID_HERC_UNI:
3475                 return XFRAME_II_DEVICE;
3476         case PCI_DEVICE_ID_S2IO_UNI:
3477         case PCI_DEVICE_ID_S2IO_WIN:
3478                 return XFRAME_I_DEVICE;
3479         default:
3480                 return PCI_ANY_ID;
3481         }
3482 }
3483
3484 /**
3485  *  s2io_reset - Resets the card.
3486  *  @sp : private member of the device structure.
3487  *  Description: Function to Reset the card. This function then also
3488  *  restores the previously saved PCI configuration space registers as
3489  *  the card reset also resets the configuration space.
3490  *  Return value:
3491  *  void.
3492  */
3493
3494 static void s2io_reset(struct s2io_nic * sp)
3495 {
3496         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3497         u64 val64;
3498         u16 subid, pci_cmd;
3499         int i;
3500         u16 val16;
3501         unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3502         unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3503
3504         DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3505                         __FUNCTION__, sp->dev->name);
3506
3507         /* Back up  the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3508         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3509
3510         val64 = SW_RESET_ALL;
3511         writeq(val64, &bar0->sw_reset);
3512         if (strstr(sp->product_name, "CX4")) {
3513                 msleep(750);
3514         }
3515         msleep(250);
3516         for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3517
3518                 /* Restore the PCI state saved during initialization. */
3519                 pci_restore_state(sp->pdev);
3520                 pci_read_config_word(sp->pdev, 0x2, &val16);
3521                 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3522                         break;
3523                 msleep(200);
3524         }
3525
3526         if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3527                 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
3528         }
3529
3530         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3531
3532         s2io_init_pci(sp);
3533
3534         /* Set swapper to enable I/O register access */
3535         s2io_set_swapper(sp);
3536
3537         /* restore mac_addr entries */
3538         do_s2io_restore_unicast_mc(sp);
3539
3540         /* Restore the MSIX table entries from local variables */
3541         restore_xmsi_data(sp);
3542
3543         /* Clear certain PCI/PCI-X fields after reset */
3544         if (sp->device_type == XFRAME_II_DEVICE) {
3545                 /* Clear "detected parity error" bit */
3546                 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3547
3548                 /* Clearing PCIX Ecc status register */
3549                 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3550
3551                 /* Clearing PCI_STATUS error reflected here */
3552                 writeq(s2BIT(62), &bar0->txpic_int_reg);
3553         }
3554
3555         /* Reset device statistics maintained by OS */
3556         memset(&sp->stats, 0, sizeof (struct net_device_stats));
3557
3558         up_cnt = sp->mac_control.stats_info->sw_stat.link_up_cnt;
3559         down_cnt = sp->mac_control.stats_info->sw_stat.link_down_cnt;
3560         up_time = sp->mac_control.stats_info->sw_stat.link_up_time;
3561         down_time = sp->mac_control.stats_info->sw_stat.link_down_time;
3562         reset_cnt = sp->mac_control.stats_info->sw_stat.soft_reset_cnt;
3563         mem_alloc_cnt = sp->mac_control.stats_info->sw_stat.mem_allocated;
3564         mem_free_cnt = sp->mac_control.stats_info->sw_stat.mem_freed;
3565         watchdog_cnt = sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt;
3566         /* save link up/down time/cnt, reset/memory/watchdog cnt */
3567         memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
3568         /* restore link up/down time/cnt, reset/memory/watchdog cnt */
3569         sp->mac_control.stats_info->sw_stat.link_up_cnt = up_cnt;
3570         sp->mac_control.stats_info->sw_stat.link_down_cnt = down_cnt;
3571         sp->mac_control.stats_info->sw_stat.link_up_time = up_time;
3572         sp->mac_control.stats_info->sw_stat.link_down_time = down_time;
3573         sp->mac_control.stats_info->sw_stat.soft_reset_cnt = reset_cnt;
3574         sp->mac_control.stats_info->sw_stat.mem_allocated = mem_alloc_cnt;
3575         sp->mac_control.stats_info->sw_stat.mem_freed = mem_free_cnt;
3576         sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt = watchdog_cnt;
3577
3578         /* SXE-002: Configure link and activity LED to turn it off */
3579         subid = sp->pdev->subsystem_device;
3580         if (((subid & 0xFF) >= 0x07) &&
3581             (sp->device_type == XFRAME_I_DEVICE)) {
3582                 val64 = readq(&bar0->gpio_control);
3583                 val64 |= 0x0000800000000000ULL;
3584                 writeq(val64, &bar0->gpio_control);
3585                 val64 = 0x0411040400000000ULL;
3586                 writeq(val64, (void __iomem *)bar0 + 0x2700);
3587         }
3588
3589         /*
3590          * Clear spurious ECC interrupts that would have occured on
3591          * XFRAME II cards after reset.
3592          */
3593         if (sp->device_type == XFRAME_II_DEVICE) {
3594                 val64 = readq(&bar0->pcc_err_reg);
3595                 writeq(val64, &bar0->pcc_err_reg);
3596         }
3597
3598         sp->device_enabled_once = FALSE;
3599 }
3600
3601 /**
3602  *  s2io_set_swapper - to set the swapper controle on the card
3603  *  @sp : private member of the device structure,
3604  *  pointer to the s2io_nic structure.
3605  *  Description: Function to set the swapper control on the card
3606  *  correctly depending on the 'endianness' of the system.
3607  *  Return value:
3608  *  SUCCESS on success and FAILURE on failure.
3609  */
3610
3611 static int s2io_set_swapper(struct s2io_nic * sp)
3612 {
3613         struct net_device *dev = sp->dev;
3614         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3615         u64 val64, valt, valr;
3616
3617         /*
3618          * Set proper endian settings and verify the same by reading
3619          * the PIF Feed-back register.
3620          */
3621
3622         val64 = readq(&bar0->pif_rd_swapper_fb);
3623         if (val64 != 0x0123456789ABCDEFULL) {
3624                 int i = 0;
3625                 u64 value[] = { 0xC30000C3C30000C3ULL,   /* FE=1, SE=1 */
3626                                 0x8100008181000081ULL,  /* FE=1, SE=0 */
3627                                 0x4200004242000042ULL,  /* FE=0, SE=1 */
3628                                 0};                     /* FE=0, SE=0 */
3629
3630                 while(i<4) {
3631                         writeq(value[i], &bar0->swapper_ctrl);
3632                         val64 = readq(&bar0->pif_rd_swapper_fb);
3633                         if (val64 == 0x0123456789ABCDEFULL)
3634                                 break;
3635                         i++;
3636                 }
3637                 if (i == 4) {
3638                         DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3639                                 dev->name);
3640                         DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3641                                 (unsigned long long) val64);
3642                         return FAILURE;
3643                 }
3644                 valr = value[i];
3645         } else {
3646                 valr = readq(&bar0->swapper_ctrl);
3647         }
3648
3649         valt = 0x0123456789ABCDEFULL;
3650         writeq(valt, &bar0->xmsi_address);
3651         val64 = readq(&bar0->xmsi_address);
3652
3653         if(val64 != valt) {
3654                 int i = 0;
3655                 u64 value[] = { 0x00C3C30000C3C300ULL,  /* FE=1, SE=1 */
3656                                 0x0081810000818100ULL,  /* FE=1, SE=0 */
3657                                 0x0042420000424200ULL,  /* FE=0, SE=1 */
3658                                 0};                     /* FE=0, SE=0 */
3659
3660                 while(i<4) {
3661                         writeq((value[i] | valr), &bar0->swapper_ctrl);
3662                         writeq(valt, &bar0->xmsi_address);
3663                         val64 = readq(&bar0->xmsi_address);
3664                         if(val64 == valt)
3665                                 break;
3666                         i++;
3667                 }
3668                 if(i == 4) {
3669                         unsigned long long x = val64;
3670                         DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3671                         DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3672                         return FAILURE;
3673                 }
3674         }
3675         val64 = readq(&bar0->swapper_ctrl);
3676         val64 &= 0xFFFF000000000000ULL;
3677
3678 #ifdef  __BIG_ENDIAN
3679         /*
3680          * The device by default set to a big endian format, so a
3681          * big endian driver need not set anything.
3682          */
3683         val64 |= (SWAPPER_CTRL_TXP_FE |
3684                  SWAPPER_CTRL_TXP_SE |
3685                  SWAPPER_CTRL_TXD_R_FE |
3686                  SWAPPER_CTRL_TXD_W_FE |
3687                  SWAPPER_CTRL_TXF_R_FE |
3688                  SWAPPER_CTRL_RXD_R_FE |
3689                  SWAPPER_CTRL_RXD_W_FE |
3690                  SWAPPER_CTRL_RXF_W_FE |
3691                  SWAPPER_CTRL_XMSI_FE |
3692                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3693         if (sp->config.intr_type == INTA)
3694                 val64 |= SWAPPER_CTRL_XMSI_SE;
3695         writeq(val64, &bar0->swapper_ctrl);
3696 #else
3697         /*
3698          * Initially we enable all bits to make it accessible by the
3699          * driver, then we selectively enable only those bits that
3700          * we want to set.
3701          */
3702         val64 |= (SWAPPER_CTRL_TXP_FE |
3703                  SWAPPER_CTRL_TXP_SE |
3704                  SWAPPER_CTRL_TXD_R_FE |
3705                  SWAPPER_CTRL_TXD_R_SE |
3706                  SWAPPER_CTRL_TXD_W_FE |
3707                  SWAPPER_CTRL_TXD_W_SE |
3708                  SWAPPER_CTRL_TXF_R_FE |
3709                  SWAPPER_CTRL_RXD_R_FE |
3710                  SWAPPER_CTRL_RXD_R_SE |
3711                  SWAPPER_CTRL_RXD_W_FE |
3712                  SWAPPER_CTRL_RXD_W_SE |
3713                  SWAPPER_CTRL_RXF_W_FE |
3714                  SWAPPER_CTRL_XMSI_FE |
3715                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3716         if (sp->config.intr_type == INTA)
3717                 val64 |= SWAPPER_CTRL_XMSI_SE;
3718         writeq(val64, &bar0->swapper_ctrl);
3719 #endif
3720         val64 = readq(&bar0->swapper_ctrl);
3721
3722         /*
3723          * Verifying if endian settings are accurate by reading a
3724          * feedback register.
3725          */
3726         val64 = readq(&bar0->pif_rd_swapper_fb);
3727         if (val64 != 0x0123456789ABCDEFULL) {
3728                 /* Endian settings are incorrect, calls for another dekko. */
3729                 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3730                           dev->name);
3731                 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3732                           (unsigned long long) val64);
3733                 return FAILURE;
3734         }
3735
3736         return SUCCESS;
3737 }
3738
3739 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3740 {
3741         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3742         u64 val64;
3743         int ret = 0, cnt = 0;
3744
3745         do {
3746                 val64 = readq(&bar0->xmsi_access);
3747                 if (!(val64 & s2BIT(15)))
3748                         break;
3749                 mdelay(1);
3750                 cnt++;
3751         } while(cnt < 5);
3752         if (cnt == 5) {
3753                 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3754                 ret = 1;
3755         }
3756
3757         return ret;
3758 }
3759
3760 static void restore_xmsi_data(struct s2io_nic *nic)
3761 {
3762         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3763         u64 val64;
3764         int i, msix_index;
3765
3766
3767         if (nic->device_type == XFRAME_I_DEVICE)
3768                 return;
3769
3770         for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3771                 msix_index = (i) ? ((i-1) * 8 + 1): 0;
3772                 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3773                 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3774                 val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
3775                 writeq(val64, &bar0->xmsi_access);
3776                 if (wait_for_msix_trans(nic, msix_index)) {
3777                         DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3778                         continue;
3779                 }
3780         }
3781 }
3782
3783 static void store_xmsi_data(struct s2io_nic *nic)
3784 {
3785         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3786         u64 val64, addr, data;
3787         int i, msix_index;
3788
3789         if (nic->device_type == XFRAME_I_DEVICE)
3790                 return;
3791
3792         /* Store and display */
3793         for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3794                 msix_index = (i) ? ((i-1) * 8 + 1): 0;
3795                 val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
3796                 writeq(val64, &bar0->xmsi_access);
3797                 if (wait_for_msix_trans(nic, msix_index)) {
3798                         DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3799                         continue;
3800                 }
3801                 addr = readq(&bar0->xmsi_address);
3802                 data = readq(&bar0->xmsi_data);
3803                 if (addr && data) {
3804                         nic->msix_info[i].addr = addr;
3805                         nic->msix_info[i].data = data;
3806                 }
3807         }
3808 }
3809
3810 static int s2io_enable_msi_x(struct s2io_nic *nic)
3811 {
3812         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3813         u64 rx_mat;
3814         u16 msi_control; /* Temp variable */
3815         int ret, i, j, msix_indx = 1;
3816
3817         nic->entries = kmalloc(nic->num_entries * sizeof(struct msix_entry),
3818                                GFP_KERNEL);
3819         if (!nic->entries) {
3820                 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \
3821                         __FUNCTION__);
3822                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3823                 return -ENOMEM;
3824         }
3825         nic->mac_control.stats_info->sw_stat.mem_allocated
3826                 += (nic->num_entries * sizeof(struct msix_entry));
3827
3828         memset(nic->entries, 0, nic->num_entries * sizeof(struct msix_entry));
3829
3830         nic->s2io_entries =
3831                 kmalloc(nic->num_entries * sizeof(struct s2io_msix_entry),
3832                                    GFP_KERNEL);
3833         if (!nic->s2io_entries) {
3834                 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3835                         __FUNCTION__);
3836                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3837                 kfree(nic->entries);
3838                 nic->mac_control.stats_info->sw_stat.mem_freed
3839                         += (nic->num_entries * sizeof(struct msix_entry));
3840                 return -ENOMEM;
3841         }
3842          nic->mac_control.stats_info->sw_stat.mem_allocated
3843                 += (nic->num_entries * sizeof(struct s2io_msix_entry));
3844         memset(nic->s2io_entries, 0,
3845                 nic->num_entries * sizeof(struct s2io_msix_entry));
3846
3847         nic->entries[0].entry = 0;
3848         nic->s2io_entries[0].entry = 0;
3849         nic->s2io_entries[0].in_use = MSIX_FLG;
3850         nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
3851         nic->s2io_entries[0].arg = &nic->mac_control.fifos;
3852
3853         for (i = 1; i < nic->num_entries; i++) {
3854                 nic->entries[i].entry = ((i - 1) * 8) + 1;
3855                 nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
3856                 nic->s2io_entries[i].arg = NULL;
3857                 nic->s2io_entries[i].in_use = 0;
3858         }
3859
3860         rx_mat = readq(&bar0->rx_mat);
3861         for (j = 0; j < nic->config.rx_ring_num; j++) {
3862                 rx_mat |= RX_MAT_SET(j, msix_indx);
3863                 nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
3864                 nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
3865                 nic->s2io_entries[j+1].in_use = MSIX_FLG;
3866                 msix_indx += 8;
3867         }
3868         writeq(rx_mat, &bar0->rx_mat);
3869         readq(&bar0->rx_mat);
3870
3871         ret = pci_enable_msix(nic->pdev, nic->entries, nic->num_entries);
3872         /* We fail init if error or we get less vectors than min required */
3873         if (ret) {
3874                 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3875                 kfree(nic->entries);
3876                 nic->mac_control.stats_info->sw_stat.mem_freed
3877                         += (nic->num_entries * sizeof(struct msix_entry));
3878                 kfree(nic->s2io_entries);
3879                 nic->mac_control.stats_info->sw_stat.mem_freed
3880                         += (nic->num_entries * sizeof(struct s2io_msix_entry));
3881                 nic->entries = NULL;
3882                 nic->s2io_entries = NULL;
3883                 return -ENOMEM;
3884         }
3885
3886         /*
3887          * To enable MSI-X, MSI also needs to be enabled, due to a bug
3888          * in the herc NIC. (Temp change, needs to be removed later)
3889          */
3890         pci_read_config_word(nic->pdev, 0x42, &msi_control);
3891         msi_control |= 0x1; /* Enable MSI */
3892         pci_write_config_word(nic->pdev, 0x42, msi_control);
3893
3894         return 0;
3895 }
3896
3897 /* Handle software interrupt used during MSI(X) test */
3898 static irqreturn_t s2io_test_intr(int irq, void *dev_id)
3899 {
3900         struct s2io_nic *sp = dev_id;
3901
3902         sp->msi_detected = 1;
3903         wake_up(&sp->msi_wait);
3904
3905         return IRQ_HANDLED;
3906 }
3907
3908 /* Test interrupt path by forcing a a software IRQ */
3909 static int s2io_test_msi(struct s2io_nic *sp)
3910 {
3911         struct pci_dev *pdev = sp->pdev;
3912         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3913         int err;
3914         u64 val64, saved64;
3915
3916         err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3917                         sp->name, sp);
3918         if (err) {
3919                 DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3920                        sp->dev->name, pci_name(pdev), pdev->irq);
3921                 return err;
3922         }
3923
3924         init_waitqueue_head (&sp->msi_wait);
3925         sp->msi_detected = 0;
3926
3927         saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3928         val64 |= SCHED_INT_CTRL_ONE_SHOT;
3929         val64 |= SCHED_INT_CTRL_TIMER_EN;
3930         val64 |= SCHED_INT_CTRL_INT2MSI(1);
3931         writeq(val64, &bar0->scheduled_int_ctrl);
3932
3933         wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3934
3935         if (!sp->msi_detected) {
3936                 /* MSI(X) test failed, go back to INTx mode */
3937                 DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
3938                         "using MSI(X) during test\n", sp->dev->name,
3939                         pci_name(pdev));
3940
3941                 err = -EOPNOTSUPP;
3942         }
3943
3944         free_irq(sp->entries[1].vector, sp);
3945
3946         writeq(saved64, &bar0->scheduled_int_ctrl);
3947
3948         return err;
3949 }
3950
3951 static void remove_msix_isr(struct s2io_nic *sp)
3952 {
3953         int i;
3954         u16 msi_control;
3955
3956         for (i = 0; i < sp->num_entries; i++) {
3957                 if (sp->s2io_entries[i].in_use ==
3958                         MSIX_REGISTERED_SUCCESS) {
3959                         int vector = sp->entries[i].vector;
3960                         void *arg = sp->s2io_entries[i].arg;
3961                         free_irq(vector, arg);
3962                 }
3963         }
3964
3965         kfree(sp->entries);
3966         kfree(sp->s2io_entries);
3967         sp->entries = NULL;
3968         sp->s2io_entries = NULL;
3969
3970         pci_read_config_word(sp->pdev, 0x42, &msi_control);
3971         msi_control &= 0xFFFE; /* Disable MSI */
3972         pci_write_config_word(sp->pdev, 0x42, msi_control);
3973
3974         pci_disable_msix(sp->pdev);
3975 }
3976
3977 static void remove_inta_isr(struct s2io_nic *sp)
3978 {
3979         struct net_device *dev = sp->dev;
3980
3981         free_irq(sp->pdev->irq, dev);
3982 }
3983
3984 /* ********************************************************* *
3985  * Functions defined below concern the OS part of the driver *
3986  * ********************************************************* */
3987
3988 /**
3989  *  s2io_open - open entry point of the driver
3990  *  @dev : pointer to the device structure.
3991  *  Description:
3992  *  This function is the open entry point of the driver. It mainly calls a
3993  *  function to allocate Rx buffers and inserts them into the buffer
3994  *  descriptors and then enables the Rx part of the NIC.
3995  *  Return value:
3996  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3997  *   file on failure.
3998  */
3999
4000 static int s2io_open(struct net_device *dev)
4001 {
4002         struct s2io_nic *sp = dev->priv;
4003         int err = 0;
4004
4005         /*
4006          * Make sure you have link off by default every time
4007          * Nic is initialized
4008          */
4009         netif_carrier_off(dev);
4010         sp->last_link_state = 0;
4011
4012         /* Initialize H/W and enable interrupts */
4013         err = s2io_card_up(sp);
4014         if (err) {
4015                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4016                           dev->name);
4017                 goto hw_init_failed;
4018         }
4019
4020         if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
4021                 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
4022                 s2io_card_down(sp);
4023                 err = -ENODEV;
4024                 goto hw_init_failed;
4025         }
4026         s2io_start_all_tx_queue(sp);
4027         return 0;
4028
4029 hw_init_failed:
4030         if (sp->config.intr_type == MSI_X) {
4031                 if (sp->entries) {
4032                         kfree(sp->entries);
4033                         sp->mac_control.stats_info->sw_stat.mem_freed
4034                         += (sp->num_entries * sizeof(struct msix_entry));
4035                 }
4036                 if (sp->s2io_entries) {
4037                         kfree(sp->s2io_entries);
4038                         sp->mac_control.stats_info->sw_stat.mem_freed
4039                         += (sp->num_entries * sizeof(struct s2io_msix_entry));
4040                 }
4041         }
4042         return err;
4043 }
4044
4045 /**
4046  *  s2io_close -close entry point of the driver
4047  *  @dev : device pointer.
4048  *  Description:
4049  *  This is the stop entry point of the driver. It needs to undo exactly
4050  *  whatever was done by the open entry point,thus it's usually referred to
4051  *  as the close function.Among other things this function mainly stops the
4052  *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
4053  *  Return value:
4054  *  0 on success and an appropriate (-)ve integer as defined in errno.h
4055  *  file on failure.
4056  */
4057
4058 static int s2io_close(struct net_device *dev)
4059 {
4060         struct s2io_nic *sp = dev->priv;
4061         struct config_param *config = &sp->config;
4062         u64 tmp64;
4063         int offset;
4064
4065         /* Return if the device is already closed               *
4066         *  Can happen when s2io_card_up failed in change_mtu    *
4067         */
4068         if (!is_s2io_card_up(sp))
4069                 return 0;
4070
4071         s2io_stop_all_tx_queue(sp);
4072         /* delete all populated mac entries */
4073         for (offset = 1; offset < config->max_mc_addr; offset++) {
4074                 tmp64 = do_s2io_read_unicast_mc(sp, offset);
4075                 if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
4076                         do_s2io_delete_unicast_mc(sp, tmp64);
4077         }
4078
4079         s2io_card_down(sp);
4080
4081         return 0;
4082 }
4083
4084 /**
4085  *  s2io_xmit - Tx entry point of te driver
4086  *  @skb : the socket buffer containing the Tx data.
4087  *  @dev : device pointer.
4088  *  Description :
4089  *  This function is the Tx entry point of the driver. S2IO NIC supports
4090  *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
4091  *  NOTE: when device cant queue the pkt,just the trans_start variable will
4092  *  not be upadted.
4093  *  Return value:
4094  *  0 on success & 1 on failure.
4095  */
4096
4097 static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4098 {
4099         struct s2io_nic *sp = dev->priv;
4100         u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4101         register u64 val64;
4102         struct TxD *txdp;
4103         struct TxFIFO_element __iomem *tx_fifo;
4104         unsigned long flags = 0;
4105         u16 vlan_tag = 0;
4106         struct fifo_info *fifo = NULL;
4107         struct mac_info *mac_control;
4108         struct config_param *config;
4109         int do_spin_lock = 1;
4110         int offload_type;
4111         int enable_per_list_interrupt = 0;
4112         struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
4113
4114         mac_control = &sp->mac_control;
4115         config = &sp->config;
4116
4117         DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
4118
4119         if (unlikely(skb->len <= 0)) {
4120                 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
4121                 dev_kfree_skb_any(skb);
4122                 return 0;
4123         }
4124
4125         if (!is_s2io_card_up(sp)) {
4126                 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4127                           dev->name);
4128                 dev_kfree_skb(skb);
4129                 return 0;
4130         }
4131
4132         queue = 0;
4133         if (sp->vlgrp && vlan_tx_tag_present(skb))
4134                 vlan_tag = vlan_tx_tag_get(skb);
4135         if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4136                 if (skb->protocol == htons(ETH_P_IP)) {
4137                         struct iphdr *ip;
4138                         struct tcphdr *th;
4139                         ip = ip_hdr(skb);
4140
4141                         if ((ip->frag_off & htons(IP_OFFSET|IP_MF)) == 0) {
4142                                 th = (struct tcphdr *)(((unsigned char *)ip) +
4143                                                 ip->ihl*4);
4144
4145                                 if (ip->protocol == IPPROTO_TCP) {
4146                                         queue_len = sp->total_tcp_fifos;
4147                                         queue = (ntohs(th->source) +
4148                                                         ntohs(th->dest)) &
4149                                             sp->fifo_selector[queue_len - 1];
4150                                         if (queue >= queue_len)
4151                                                 queue = queue_len - 1;
4152                                 } else if (ip->protocol == IPPROTO_UDP) {
4153                                         queue_len = sp->total_udp_fifos;
4154                                         queue = (ntohs(th->source) +
4155                                                         ntohs(th->dest)) &
4156                                             sp->fifo_selector[queue_len - 1];
4157                                         if (queue >= queue_len)
4158                                                 queue = queue_len - 1;
4159                                         queue += sp->udp_fifo_idx;
4160                                         if (skb->len > 1024)
4161                                                 enable_per_list_interrupt = 1;
4162                                         do_spin_lock = 0;
4163                                 }
4164                         }
4165                 }
4166         } else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4167                 /* get fifo number based on skb->priority value */
4168                 queue = config->fifo_mapping
4169                                         [skb->priority & (MAX_TX_FIFOS - 1)];
4170         fifo = &mac_control->fifos[queue];
4171
4172         if (do_spin_lock)
4173                 spin_lock_irqsave(&fifo->tx_lock, flags);
4174         else {
4175                 if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
4176                         return NETDEV_TX_LOCKED;
4177         }
4178
4179         if (sp->config.multiq) {
4180                 if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4181                         spin_unlock_irqrestore(&fifo->tx_lock, flags);
4182                         return NETDEV_TX_BUSY;
4183                 }
4184         } else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
4185                 if (netif_queue_stopped(dev)) {
4186                         spin_unlock_irqrestore(&fifo->tx_lock, flags);
4187                         return NETDEV_TX_BUSY;
4188                 }
4189         }
4190
4191         put_off = (u16) fifo->tx_curr_put_info.offset;
4192         get_off = (u16) fifo->tx_curr_get_info.offset;
4193         txdp = (struct TxD *) fifo->list_info[put_off].list_virt_addr;
4194
4195         queue_len = fifo->tx_curr_put_info.fifo_len + 1;
4196         /* Avoid "put" pointer going beyond "get" pointer */
4197         if (txdp->Host_Control ||
4198                    ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4199                 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4200                 s2io_stop_tx_queue(sp, fifo->fifo_no);
4201                 dev_kfree_skb(skb);
4202                 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4203                 return 0;
4204         }
4205
4206         offload_type = s2io_offload_type(skb);
4207         if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4208                 txdp->Control_1 |= TXD_TCP_LSO_EN;
4209                 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4210         }
4211         if (skb->ip_summed == CHECKSUM_PARTIAL) {
4212                 txdp->Control_2 |=
4213                     (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
4214                      TXD_TX_CKO_UDP_EN);
4215         }
4216         txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4217         txdp->Control_1 |= TXD_LIST_OWN_XENA;
4218         txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
4219         if (enable_per_list_interrupt)
4220                 if (put_off & (queue_len >> 5))
4221                         txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
4222         if (vlan_tag) {
4223                 txdp->Control_2 |= TXD_VLAN_ENABLE;
4224                 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4225         }
4226
4227         frg_len = skb->len - skb->data_len;
4228         if (offload_type == SKB_GSO_UDP) {
4229                 int ufo_size;
4230
4231                 ufo_size = s2io_udp_mss(skb);
4232                 ufo_size &= ~7;
4233                 txdp->Control_1 |= TXD_UFO_EN;
4234                 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4235                 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4236 #ifdef __BIG_ENDIAN
4237                 /* both variants do cpu_to_be64(be32_to_cpu(...)) */
4238                 fifo->ufo_in_band_v[put_off] =
4239                                 (__force u64)skb_shinfo(skb)->ip6_frag_id;
4240 #else
4241                 fifo->ufo_in_band_v[put_off] =
4242                                 (__force u64)skb_shinfo(skb)->ip6_frag_id << 32;
4243 #endif
4244                 txdp->Host_Control = (unsigned long)fifo->ufo_in_band_v;
4245                 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
4246                                         fifo->ufo_in_band_v,
4247                                         sizeof(u64), PCI_DMA_TODEVICE);
4248                 if (pci_dma_mapping_error(txdp->Buffer_Pointer))
4249                         goto pci_map_failed;
4250                 txdp++;
4251         }
4252
4253         txdp->Buffer_Pointer = pci_map_single
4254             (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
4255         if (pci_dma_mapping_error(txdp->Buffer_Pointer))
4256                 goto pci_map_failed;
4257
4258         txdp->Host_Control = (unsigned long) skb;
4259         txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4260         if (offload_type == SKB_GSO_UDP)
4261                 txdp->Control_1 |= TXD_UFO_EN;
4262
4263         frg_cnt = skb_shinfo(skb)->nr_frags;
4264         /* For fragmented SKB. */
4265         for (i = 0; i < frg_cnt; i++) {
4266                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4267                 /* A '0' length fragment will be ignored */
4268                 if (!frag->size)
4269                         continue;
4270                 txdp++;
4271                 txdp->Buffer_Pointer = (u64) pci_map_page
4272                     (sp->pdev, frag->page, frag->page_offset,
4273                      frag->size, PCI_DMA_TODEVICE);
4274                 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
4275                 if (offload_type == SKB_GSO_UDP)
4276                         txdp->Control_1 |= TXD_UFO_EN;
4277         }
4278         txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4279
4280         if (offload_type == SKB_GSO_UDP)
4281                 frg_cnt++; /* as Txd0 was used for inband header */
4282
4283         tx_fifo = mac_control->tx_FIFO_start[queue];
4284         val64 = fifo->list_info[put_off].list_phy_addr;
4285         writeq(val64, &tx_fifo->TxDL_Pointer);
4286
4287         val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4288                  TX_FIFO_LAST_LIST);
4289         if (offload_type)
4290                 val64 |= TX_FIFO_SPECIAL_FUNC;
4291
4292         writeq(val64, &tx_fifo->List_Control);
4293
4294         mmiowb();
4295
4296         put_off++;
4297         if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
4298                 put_off = 0;
4299         fifo->tx_curr_put_info.offset = put_off;
4300
4301         /* Avoid "put" pointer going beyond "get" pointer */
4302         if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4303                 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
4304                 DBG_PRINT(TX_DBG,
4305                           "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4306                           put_off, get_off);
4307                 s2io_stop_tx_queue(sp, fifo->fifo_no);
4308         }
4309         mac_control->stats_info->sw_stat.mem_allocated += skb->truesize;
4310         dev->trans_start = jiffies;
4311         spin_unlock_irqrestore(&fifo->tx_lock, flags);
4312
4313         if (sp->config.intr_type == MSI_X)
4314                 tx_intr_handler(fifo);
4315
4316         return 0;
4317 pci_map_failed:
4318         stats->pci_map_fail_cnt++;
4319         s2io_stop_tx_queue(sp, fifo->fifo_no);
4320         stats->mem_freed += skb->truesize;
4321         dev_kfree_skb(skb);
4322         spin_unlock_irqrestore(&fifo->tx_lock, flags);
4323         return 0;
4324 }
4325
4326 static void
4327 s2io_alarm_handle(unsigned long data)
4328 {
4329         struct s2io_nic *sp = (struct s2io_nic *)data;
4330         struct net_device *dev = sp->dev;
4331
4332         s2io_handle_errors(dev);
4333         mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4334 }
4335
4336 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4337 {
4338         struct ring_info *ring = (struct ring_info *)dev_id;
4339         struct s2io_nic *sp = ring->nic;
4340         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4341         struct net_device *dev = sp->dev;
4342
4343         if (unlikely(!is_s2io_card_up(sp)))
4344                 return IRQ_HANDLED;
4345
4346         if (sp->config.napi) {
4347                 u8 __iomem *addr = NULL;
4348                 u8 val8 = 0;
4349
4350                 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
4351                 addr += (7 - ring->ring_no);
4352                 val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4353                 writeb(val8, addr);
4354                 val8 = readb(addr);
4355                 netif_rx_schedule(dev, &ring->napi);
4356         } else {
4357                 rx_intr_handler(ring, 0);
4358                 s2io_chk_rx_buffers(ring);
4359         }
4360
4361         return IRQ_HANDLED;
4362 }
4363
4364 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4365 {
4366         int i;
4367         struct fifo_info *fifos = (struct fifo_info *)dev_id;
4368         struct s2io_nic *sp = fifos->nic;
4369         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4370         struct config_param *config  = &sp->config;
4371         u64 reason;
4372
4373         if (unlikely(!is_s2io_card_up(sp)))
4374                 return IRQ_NONE;
4375
4376         reason = readq(&bar0->general_int_status);
4377         if (unlikely(reason == S2IO_MINUS_ONE))
4378                 /* Nothing much can be done. Get out */
4379                 return IRQ_HANDLED;
4380
4381         if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) {
4382                 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4383
4384                 if (reason & GEN_INTR_TXPIC)
4385                         s2io_txpic_intr_handle(sp);
4386
4387                 if (reason & GEN_INTR_TXTRAFFIC)
4388                         writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4389
4390                 for (i = 0; i < config->tx_fifo_num; i++)
4391                         tx_intr_handler(&fifos[i]);
4392
4393                 writeq(sp->general_int_mask, &bar0->general_int_mask);
4394                 readl(&bar0->general_int_status);
4395                 return IRQ_HANDLED;
4396         }
4397         /* The interrupt was not raised by us */
4398         return IRQ_NONE;
4399 }
4400
4401 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4402 {
4403         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4404         u64 val64;
4405
4406         val64 = readq(&bar0->pic_int_status);
4407         if (val64 & PIC_INT_GPIO) {
4408                 val64 = readq(&bar0->gpio_int_reg);
4409                 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4410                     (val64 & GPIO_INT_REG_LINK_UP)) {
4411                         /*
4412                          * This is unstable state so clear both up/down
4413                          * interrupt and adapter to re-evaluate the link state.
4414                          */
4415                         val64 |=  GPIO_INT_REG_LINK_DOWN;
4416                         val64 |= GPIO_INT_REG_LINK_UP;
4417                         writeq(val64, &bar0->gpio_int_reg);
4418                         val64 = readq(&bar0->gpio_int_mask);
4419                         val64 &= ~(GPIO_INT_MASK_LINK_UP |
4420                                    GPIO_INT_MASK_LINK_DOWN);
4421                         writeq(val64, &bar0->gpio_int_mask);
4422                 }
4423                 else if (val64 & GPIO_INT_REG_LINK_UP) {
4424                         val64 = readq(&bar0->adapter_status);
4425                                 /* Enable Adapter */
4426                         val64 = readq(&bar0->adapter_control);
4427                         val64 |= ADAPTER_CNTL_EN;
4428                         writeq(val64, &bar0->adapter_control);
4429                         val64 |= ADAPTER_LED_ON;
4430                         writeq(val64, &bar0->adapter_control);
4431                         if (!sp->device_enabled_once)
4432                                 sp->device_enabled_once = 1;
4433
4434                         s2io_link(sp, LINK_UP);
4435                         /*
4436                          * unmask link down interrupt and mask link-up
4437                          * intr
4438                          */
4439                         val64 = readq(&bar0->gpio_int_mask);
4440                         val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4441                         val64 |= GPIO_INT_MASK_LINK_UP;
4442                         writeq(val64, &bar0->gpio_int_mask);
4443
4444                 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4445                         val64 = readq(&bar0->adapter_status);
4446                         s2io_link(sp, LINK_DOWN);
4447                         /* Link is down so unmaks link up interrupt */
4448                         val64 = readq(&bar0->gpio_int_mask);
4449                         val64 &= ~GPIO_INT_MASK_LINK_UP;
4450                         val64 |= GPIO_INT_MASK_LINK_DOWN;
4451                         writeq(val64, &bar0->gpio_int_mask);
4452
4453                         /* turn off LED */
4454                         val64 = readq(&bar0->adapter_control);
4455                         val64 = val64 &(~ADAPTER_LED_ON);
4456                         writeq(val64, &bar0->adapter_control);
4457                 }
4458         }
4459         val64 = readq(&bar0->gpio_int_mask);
4460 }
4461
4462 /**
4463  *  do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4464  *  @value: alarm bits
4465  *  @addr: address value
4466  *  @cnt: counter variable
4467  *  Description: Check for alarm and increment the counter
4468  *  Return Value:
4469  *  1 - if alarm bit set
4470  *  0 - if alarm bit is not set
4471  */
4472 static int do_s2io_chk_alarm_bit(u64 value, void __iomem * addr,
4473                           unsigned long long *cnt)
4474 {
4475         u64 val64;
4476         val64 = readq(addr);
4477         if ( val64 & value ) {
4478                 writeq(val64, addr);
4479                 (*cnt)++;
4480                 return 1;
4481         }
4482         return 0;
4483
4484 }
4485
4486 /**
4487  *  s2io_handle_errors - Xframe error indication handler
4488  *  @nic: device private variable
4489  *  Description: Handle alarms such as loss of link, single or
4490  *  double ECC errors, critical and serious errors.
4491  *  Return Value:
4492  *  NONE
4493  */
4494 static void s2io_handle_errors(void * dev_id)
4495 {
4496         struct net_device *dev = (struct net_device *) dev_id;
4497         struct s2io_nic *sp = dev->priv;
4498         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4499         u64 temp64 = 0,val64=0;
4500         int i = 0;
4501
4502         struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4503         struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4504
4505         if (!is_s2io_card_up(sp))
4506                 return;
4507
4508         if (pci_channel_offline(sp->pdev))
4509                 return;
4510
4511         memset(&sw_stat->ring_full_cnt, 0,
4512                 sizeof(sw_stat->ring_full_cnt));
4513
4514         /* Handling the XPAK counters update */
4515         if(stats->xpak_timer_count < 72000) {
4516                 /* waiting for an hour */
4517                 stats->xpak_timer_count++;
4518         } else {
4519                 s2io_updt_xpak_counter(dev);
4520                 /* reset the count to zero */
4521                 stats->xpak_timer_count = 0;
4522         }
4523
4524         /* Handling link status change error Intr */
4525         if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4526                 val64 = readq(&bar0->mac_rmac_err_reg);
4527                 writeq(val64, &bar0->mac_rmac_err_reg);
4528                 if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4529                         schedule_work(&sp->set_link_task);
4530         }
4531
4532         /* In case of a serious error, the device will be Reset. */
4533         if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4534                                 &sw_stat->serious_err_cnt))
4535                 goto reset;
4536
4537         /* Check for data parity error */
4538         if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4539                                 &sw_stat->parity_err_cnt))
4540                 goto reset;
4541
4542         /* Check for ring full counter */
4543         if (sp->device_type == XFRAME_II_DEVICE) {
4544                 val64 = readq(&bar0->ring_bump_counter1);
4545                 for (i=0; i<4; i++) {
4546                         temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4547                         temp64 >>= 64 - ((i+1)*16);
4548                         sw_stat->ring_full_cnt[i] += temp64;
4549                 }
4550
4551                 val64 = readq(&bar0->ring_bump_counter2);
4552                 for (i=0; i<4; i++) {
4553                         temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4554                         temp64 >>= 64 - ((i+1)*16);
4555                          sw_stat->ring_full_cnt[i+4] += temp64;
4556                 }
4557         }
4558
4559         val64 = readq(&bar0->txdma_int_status);
4560         /*check for pfc_err*/
4561         if (val64 & TXDMA_PFC_INT) {
4562                 if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM|
4563                                 PFC_MISC_0_ERR | PFC_MISC_1_ERR|
4564                                 PFC_PCIX_ERR, &bar0->pfc_err_reg,
4565                                 &sw_stat->pfc_err_cnt))
4566                         goto reset;
4567                 do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR, &bar0->pfc_err_reg,
4568                                 &sw_stat->pfc_err_cnt);
4569         }
4570
4571         /*check for tda_err*/
4572         if (val64 & TXDMA_TDA_INT) {
4573                 if(do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
4574                                 TDA_SM1_ERR_ALARM, &bar0->tda_err_reg,
4575                                 &sw_stat->tda_err_cnt))
4576                         goto reset;
4577                 do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4578                                 &bar0->tda_err_reg, &sw_stat->tda_err_cnt);
4579         }
4580         /*check for pcc_err*/
4581         if (val64 & TXDMA_PCC_INT) {
4582                 if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM
4583                                 | PCC_N_SERR | PCC_6_COF_OV_ERR
4584                                 | PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR
4585                                 | PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR
4586                                 | PCC_TXB_ECC_DB_ERR, &bar0->pcc_err_reg,
4587                                 &sw_stat->pcc_err_cnt))
4588                         goto reset;
4589                 do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4590                                 &bar0->pcc_err_reg, &sw_stat->pcc_err_cnt);
4591         }
4592
4593         /*check for tti_err*/
4594         if (val64 & TXDMA_TTI_INT) {
4595                 if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM, &bar0->tti_err_reg,
4596                                 &sw_stat->tti_err_cnt))
4597                         goto reset;
4598                 do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4599                                 &bar0->tti_err_reg, &sw_stat->tti_err_cnt);
4600         }
4601
4602         /*check for lso_err*/
4603         if (val64 & TXDMA_LSO_INT) {
4604                 if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT
4605                                 | LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4606                                 &bar0->lso_err_reg, &sw_stat->lso_err_cnt))
4607                         goto reset;
4608                 do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4609                                 &bar0->lso_err_reg, &sw_stat->lso_err_cnt);
4610         }
4611
4612         /*check for tpa_err*/
4613         if (val64 & TXDMA_TPA_INT) {
4614                 if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM, &bar0->tpa_err_reg,
4615                         &sw_stat->tpa_err_cnt))
4616                         goto reset;
4617                 do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP, &bar0->tpa_err_reg,
4618                         &sw_stat->tpa_err_cnt);
4619         }
4620
4621         /*check for sm_err*/
4622         if (val64 & TXDMA_SM_INT) {
4623                 if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM, &bar0->sm_err_reg,
4624                         &sw_stat->sm_err_cnt))
4625                         goto reset;
4626         }
4627
4628         val64 = readq(&bar0->mac_int_status);
4629         if (val64 & MAC_INT_STATUS_TMAC_INT) {
4630                 if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4631                                 &bar0->mac_tmac_err_reg,
4632                                 &sw_stat->mac_tmac_err_cnt))
4633                         goto reset;
4634                 do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR
4635                                 | TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
4636                                 &bar0->mac_tmac_err_reg,
4637                                 &sw_stat->mac_tmac_err_cnt);
4638         }
4639
4640         val64 = readq(&bar0->xgxs_int_status);
4641         if (val64 & XGXS_INT_STATUS_TXGXS) {
4642                 if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4643                                 &bar0->xgxs_txgxs_err_reg,
4644                                 &sw_stat->xgxs_txgxs_err_cnt))
4645                         goto reset;
4646                 do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4647                                 &bar0->xgxs_txgxs_err_reg,
4648                                 &sw_stat->xgxs_txgxs_err_cnt);
4649         }
4650
4651         val64 = readq(&bar0->rxdma_int_status);
4652         if (val64 & RXDMA_INT_RC_INT_M) {
4653                 if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR
4654                                 | RC_PRCn_SM_ERR_ALARM |RC_FTC_SM_ERR_ALARM,
4655                                 &bar0->rc_err_reg, &sw_stat->rc_err_cnt))
4656                         goto reset;
4657                 do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR
4658                                 | RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4659                                 &sw_stat->rc_err_cnt);
4660                 if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn
4661                                 | PRC_PCI_AB_F_WR_Rn, &bar0->prc_pcix_err_reg,
4662                                 &sw_stat->prc_pcix_err_cnt))
4663                         goto reset;
4664                 do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn | PRC_PCI_DP_WR_Rn
4665                                 | PRC_PCI_DP_F_WR_Rn, &bar0->prc_pcix_err_reg,
4666                                 &sw_stat->prc_pcix_err_cnt);
4667         }
4668
4669         if (val64 & RXDMA_INT_RPA_INT_M) {
4670                 if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4671                                 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt))
4672                         goto reset;
4673                 do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4674                                 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt);
4675         }
4676
4677         if (val64 & RXDMA_INT_RDA_INT_M) {
4678                 if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR
4679                                 | RDA_FRM_ECC_DB_N_AERR | RDA_SM1_ERR_ALARM
4680                                 | RDA_SM0_ERR_ALARM | RDA_RXD_ECC_DB_SERR,
4681                                 &bar0->rda_err_reg, &sw_stat->rda_err_cnt))
4682                         goto reset;
4683                 do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR | RDA_FRM_ECC_SG_ERR
4684                                 | RDA_MISC_ERR | RDA_PCIX_ERR,
4685                                 &bar0->rda_err_reg, &sw_stat->rda_err_cnt);
4686         }
4687
4688         if (val64 & RXDMA_INT_RTI_INT_M) {
4689                 if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM, &bar0->rti_err_reg,
4690                                 &sw_stat->rti_err_cnt))
4691                         goto reset;
4692                 do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4693                                 &bar0->rti_err_reg, &sw_stat->rti_err_cnt);
4694         }
4695
4696         val64 = readq(&bar0->mac_int_status);
4697         if (val64 & MAC_INT_STATUS_RMAC_INT) {
4698                 if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4699                                 &bar0->mac_rmac_err_reg,
4700                                 &sw_stat->mac_rmac_err_cnt))
4701                         goto reset;
4702                 do_s2io_chk_alarm_bit(RMAC_UNUSED_INT|RMAC_SINGLE_ECC_ERR|
4703                                 RMAC_DOUBLE_ECC_ERR, &bar0->mac_rmac_err_reg,
4704                                 &sw_stat->mac_rmac_err_cnt);
4705         }
4706
4707         val64 = readq(&bar0->xgxs_int_status);
4708         if (val64 & XGXS_INT_STATUS_RXGXS) {
4709                 if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4710                                 &bar0->xgxs_rxgxs_err_reg,
4711                                 &sw_stat->xgxs_rxgxs_err_cnt))
4712                         goto reset;
4713         }
4714
4715         val64 = readq(&bar0->mc_int_status);
4716         if(val64 & MC_INT_STATUS_MC_INT) {
4717                 if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR, &bar0->mc_err_reg,
4718                                 &sw_stat->mc_err_cnt))
4719                         goto reset;
4720
4721                 /* Handling Ecc errors */
4722                 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4723                         writeq(val64, &bar0->mc_err_reg);
4724                         if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4725                                 sw_stat->double_ecc_errs++;
4726                                 if (sp->device_type != XFRAME_II_DEVICE) {
4727                                         /*
4728                                          * Reset XframeI only if critical error
4729                                          */
4730                                         if (val64 &
4731                                                 (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4732                                                 MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4733                                                                 goto reset;
4734                                         }
4735                         } else
4736                                 sw_stat->single_ecc_errs++;
4737                 }
4738         }
4739         return;
4740
4741 reset:
4742         s2io_stop_all_tx_queue(sp);
4743         schedule_work(&sp->rst_timer_task);
4744         sw_stat->soft_reset_cnt++;
4745         return;
4746 }
4747
4748 /**
4749  *  s2io_isr - ISR handler of the device .
4750  *  @irq: the irq of the device.
4751  *  @dev_id: a void pointer to the dev structure of the NIC.
4752  *  Description:  This function is the ISR handler of the device. It
4753  *  identifies the reason for the interrupt and calls the relevant
4754  *  service routines. As a contongency measure, this ISR allocates the
4755  *  recv buffers, if their numbers are below the panic value which is
4756  *  presently set to 25% of the original number of rcv buffers allocated.
4757  *  Return value:
4758  *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
4759  *   IRQ_NONE: will be returned if interrupt is not from our device
4760  */
4761 static irqreturn_t s2io_isr(int irq, void *dev_id)
4762 {
4763         struct net_device *dev = (struct net_device *) dev_id;
4764         struct s2io_nic *sp = dev->priv;
4765         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4766         int i;
4767         u64 reason = 0;
4768         struct mac_info *mac_control;
4769         struct config_param *config;
4770
4771         /* Pretend we handled any irq's from a disconnected card */
4772         if (pci_channel_offline(sp->pdev))
4773                 return IRQ_NONE;
4774
4775         if (!is_s2io_card_up(sp))
4776                 return IRQ_NONE;
4777
4778         mac_control = &sp->mac_control;
4779         config = &sp->config;
4780
4781         /*
4782          * Identify the cause for interrupt and call the appropriate
4783          * interrupt handler. Causes for the interrupt could be;
4784          * 1. Rx of packet.
4785          * 2. Tx complete.
4786          * 3. Link down.
4787          */
4788         reason = readq(&bar0->general_int_status);
4789
4790         if (unlikely(reason == S2IO_MINUS_ONE) ) {
4791                 /* Nothing much can be done. Get out */
4792                 return IRQ_HANDLED;
4793         }
4794
4795         if (reason & (GEN_INTR_RXTRAFFIC |
4796                 GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC))
4797         {
4798                 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4799
4800                 if (config->napi) {
4801                         if (reason & GEN_INTR_RXTRAFFIC) {
4802                                 netif_rx_schedule(dev, &sp->napi);
4803                                 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4804                                 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4805                                 readl(&bar0->rx_traffic_int);
4806                         }
4807                 } else {
4808                         /*
4809                          * rx_traffic_int reg is an R1 register, writing all 1's
4810                          * will ensure that the actual interrupt causing bit
4811                          * get's cleared and hence a read can be avoided.
4812                          */
4813                         if (reason & GEN_INTR_RXTRAFFIC)
4814                                 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4815
4816                         for (i = 0; i < config->rx_ring_num; i++)
4817                                 rx_intr_handler(&mac_control->rings[i], 0);
4818                 }
4819
4820                 /*
4821                  * tx_traffic_int reg is an R1 register, writing all 1's
4822                  * will ensure that the actual interrupt causing bit get's
4823                  * cleared and hence a read can be avoided.
4824                  */
4825                 if (reason & GEN_INTR_TXTRAFFIC)
4826                         writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4827
4828                 for (i = 0; i < config->tx_fifo_num; i++)
4829                         tx_intr_handler(&mac_control->fifos[i]);
4830
4831                 if (reason & GEN_INTR_TXPIC)
4832                         s2io_txpic_intr_handle(sp);
4833
4834                 /*
4835                  * Reallocate the buffers from the interrupt handler itself.
4836                  */
4837                 if (!config->napi) {
4838                         for (i = 0; i < config->rx_ring_num; i++)
4839                                 s2io_chk_rx_buffers(&mac_control->rings[i]);
4840                 }
4841                 writeq(sp->general_int_mask, &bar0->general_int_mask);
4842                 readl(&bar0->general_int_status);
4843
4844                 return IRQ_HANDLED;
4845
4846         }
4847         else if (!reason) {
4848                 /* The interrupt was not raised by us */
4849                 return IRQ_NONE;
4850         }
4851
4852         return IRQ_HANDLED;
4853 }
4854
4855 /**
4856  * s2io_updt_stats -
4857  */
4858 static void s2io_updt_stats(struct s2io_nic *sp)
4859 {
4860         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4861         u64 val64;
4862         int cnt = 0;
4863
4864         if (is_s2io_card_up(sp)) {
4865                 /* Apprx 30us on a 133 MHz bus */
4866                 val64 = SET_UPDT_CLICKS(10) |
4867                         STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4868                 writeq(val64, &bar0->stat_cfg);
4869                 do {
4870                         udelay(100);
4871                         val64 = readq(&bar0->stat_cfg);
4872                         if (!(val64 & s2BIT(0)))
4873                                 break;
4874                         cnt++;
4875                         if (cnt == 5)
4876                                 break; /* Updt failed */
4877                 } while(1);
4878         }
4879 }
4880
4881 /**
4882  *  s2io_get_stats - Updates the device statistics structure.
4883  *  @dev : pointer to the device structure.
4884  *  Description:
4885  *  This function updates the device statistics structure in the s2io_nic
4886  *  structure and returns a pointer to the same.
4887  *  Return value:
4888  *  pointer to the updated net_device_stats structure.
4889  */
4890
4891 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4892 {
4893         struct s2io_nic *sp = dev->priv;
4894         struct mac_info *mac_control;
4895         struct config_param *config;
4896         int i;
4897
4898
4899         mac_control = &sp->mac_control;
4900         config = &sp->config;
4901
4902         /* Configure Stats for immediate updt */
4903         s2io_updt_stats(sp);
4904
4905         sp->stats.tx_packets =
4906                 le32_to_cpu(mac_control->stats_info->tmac_frms);
4907         sp->stats.tx_errors =
4908                 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4909         sp->stats.rx_errors =
4910                 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
4911         sp->stats.multicast =
4912                 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
4913         sp->stats.rx_length_errors =
4914                 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
4915
4916         /* collect per-ring rx_packets and rx_bytes */
4917         sp->stats.rx_packets = sp->stats.rx_bytes = 0;
4918         for (i = 0; i < config->rx_ring_num; i++) {
4919                 sp->stats.rx_packets += mac_control->rings[i].rx_packets;
4920                 sp->stats.rx_bytes += mac_control->rings[i].rx_bytes;
4921         }
4922
4923         return (&sp->stats);
4924 }
4925
4926 /**
4927  *  s2io_set_multicast - entry point for multicast address enable/disable.
4928  *  @dev : pointer to the device structure
4929  *  Description:
4930  *  This function is a driver entry point which gets called by the kernel
4931  *  whenever multicast addresses must be enabled/disabled. This also gets
4932  *  called to set/reset promiscuous mode. Depending on the deivce flag, we
4933  *  determine, if multicast address must be enabled or if promiscuous mode
4934  *  is to be disabled etc.
4935  *  Return value:
4936  *  void.
4937  */
4938
4939 static void s2io_set_multicast(struct net_device *dev)
4940 {
4941         int i, j, prev_cnt;
4942         struct dev_mc_list *mclist;
4943         struct s2io_nic *sp = dev->priv;
4944         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4945         u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4946             0xfeffffffffffULL;
4947         u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
4948         void __iomem *add;
4949         struct config_param *config = &sp->config;
4950
4951         if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4952                 /*  Enable all Multicast addresses */
4953                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4954                        &bar0->rmac_addr_data0_mem);
4955                 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4956                        &bar0->rmac_addr_data1_mem);
4957                 val64 = RMAC_ADDR_CMD_MEM_WE |
4958                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4959                     RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
4960                 writeq(val64, &bar0->rmac_addr_cmd_mem);
4961                 /* Wait till command completes */
4962                 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4963                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4964                                         S2IO_BIT_RESET);
4965
4966                 sp->m_cast_flg = 1;
4967                 sp->all_multi_pos = config->max_mc_addr - 1;
4968         } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4969                 /*  Disable all Multicast addresses */
4970                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4971                        &bar0->rmac_addr_data0_mem);
4972                 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4973                        &bar0->rmac_addr_data1_mem);
4974                 val64 = RMAC_ADDR_CMD_MEM_WE |
4975                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4976                     RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4977                 writeq(val64, &bar0->rmac_addr_cmd_mem);
4978                 /* Wait till command completes */
4979                 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4980                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4981                                         S2IO_BIT_RESET);
4982
4983                 sp->m_cast_flg = 0;
4984                 sp->all_multi_pos = 0;
4985         }
4986
4987         if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4988                 /*  Put the NIC into promiscuous mode */
4989                 add = &bar0->mac_cfg;
4990                 val64 = readq(&bar0->mac_cfg);
4991                 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4992
4993                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4994                 writel((u32) val64, add);
4995                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4996                 writel((u32) (val64 >> 32), (add + 4));
4997
4998                 if (vlan_tag_strip != 1) {
4999                         val64 = readq(&bar0->rx_pa_cfg);
5000                         val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
5001                         writeq(val64, &bar0->rx_pa_cfg);
5002                         vlan_strip_flag = 0;
5003                 }
5004
5005                 val64 = readq(&bar0->mac_cfg);
5006                 sp->promisc_flg = 1;
5007                 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
5008                           dev->name);
5009         } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
5010                 /*  Remove the NIC from promiscuous mode */
5011                 add = &bar0->mac_cfg;
5012                 val64 = readq(&bar0->mac_cfg);
5013                 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
5014
5015                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5016                 writel((u32) val64, add);
5017                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5018                 writel((u32) (val64 >> 32), (add + 4));
5019
5020                 if (vlan_tag_strip != 0) {
5021                         val64 = readq(&bar0->rx_pa_cfg);
5022                         val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
5023                         writeq(val64, &bar0->rx_pa_cfg);
5024                         vlan_strip_flag = 1;
5025                 }
5026
5027                 val64 = readq(&bar0->mac_cfg);
5028                 sp->promisc_flg = 0;
5029                 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
5030                           dev->name);
5031         }
5032
5033         /*  Update individual M_CAST address list */
5034         if ((!sp->m_cast_flg) && dev->mc_count) {
5035                 if (dev->mc_count >
5036                     (config->max_mc_addr - config->max_mac_addr)) {
5037                         DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
5038                                   dev->name);
5039                         DBG_PRINT(ERR_DBG, "can be added, please enable ");
5040                         DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
5041                         return;
5042                 }
5043
5044                 prev_cnt = sp->mc_addr_count;
5045                 sp->mc_addr_count = dev->mc_count;
5046
5047                 /* Clear out the previous list of Mc in the H/W. */
5048                 for (i = 0; i < prev_cnt; i++) {
5049                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
5050                                &bar0->rmac_addr_data0_mem);
5051                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5052                                 &bar0->rmac_addr_data1_mem);
5053                         val64 = RMAC_ADDR_CMD_MEM_WE |
5054                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5055                             RMAC_ADDR_CMD_MEM_OFFSET
5056                             (config->mc_start_offset + i);
5057                         writeq(val64, &bar0->rmac_addr_cmd_mem);
5058
5059                         /* Wait for command completes */
5060                         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5061                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5062                                         S2IO_BIT_RESET)) {
5063                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
5064                                           dev->name);
5065                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
5066                                 return;
5067                         }
5068                 }
5069
5070                 /* Create the new Rx filter list and update the same in H/W. */
5071                 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
5072                      i++, mclist = mclist->next) {
5073                         memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
5074                                ETH_ALEN);
5075                         mac_addr = 0;
5076                         for (j = 0; j < ETH_ALEN; j++) {
5077                                 mac_addr |= mclist->dmi_addr[j];
5078                                 mac_addr <<= 8;
5079                         }
5080                         mac_addr >>= 8;
5081                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5082                                &bar0->rmac_addr_data0_mem);
5083                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5084                                 &bar0->rmac_addr_data1_mem);
5085                         val64 = RMAC_ADDR_CMD_MEM_WE |
5086                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5087                             RMAC_ADDR_CMD_MEM_OFFSET
5088                             (i + config->mc_start_offset);
5089                         writeq(val64, &bar0->rmac_addr_cmd_mem);
5090
5091                         /* Wait for command completes */
5092                         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5093                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5094                                         S2IO_BIT_RESET)) {
5095                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
5096                                           dev->name);
5097                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
5098                                 return;
5099                         }
5100                 }
5101         }
5102 }
5103
5104 /* read from CAM unicast & multicast addresses and store it in
5105  * def_mac_addr structure
5106  */
5107 void do_s2io_store_unicast_mc(struct s2io_nic *sp)
5108 {
5109         int offset;
5110         u64 mac_addr = 0x0;
5111         struct config_param *config = &sp->config;
5112
5113         /* store unicast & multicast mac addresses */
5114         for (offset = 0; offset < config->max_mc_addr; offset++) {
5115                 mac_addr = do_s2io_read_unicast_mc(sp, offset);
5116                 /* if read fails disable the entry */
5117                 if (mac_addr == FAILURE)
5118                         mac_addr = S2IO_DISABLE_MAC_ENTRY;
5119                 do_s2io_copy_mac_addr(sp, offset, mac_addr);
5120         }
5121 }
5122
5123 /* restore unicast & multicast MAC to CAM from def_mac_addr structure */
5124 static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5125 {
5126         int offset;
5127         struct config_param *config = &sp->config;
5128         /* restore unicast mac address */
5129         for (offset = 0; offset < config->max_mac_addr; offset++)
5130                 do_s2io_prog_unicast(sp->dev,
5131                         sp->def_mac_addr[offset].mac_addr);
5132
5133         /* restore multicast mac address */
5134         for (offset = config->mc_start_offset;
5135                 offset < config->max_mc_addr; offset++)
5136                 do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5137 }
5138
5139 /* add a multicast MAC address to CAM */
5140 static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5141 {
5142         int i;
5143         u64 mac_addr = 0;
5144         struct config_param *config = &sp->config;
5145
5146         for (i = 0; i < ETH_ALEN; i++) {
5147                 mac_addr <<= 8;
5148                 mac_addr |= addr[i];
5149         }
5150         if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5151                 return SUCCESS;
5152
5153         /* check if the multicast mac already preset in CAM */
5154         for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5155                 u64 tmp64;
5156                 tmp64 = do_s2io_read_unicast_mc(sp, i);
5157                 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5158                         break;
5159
5160                 if (tmp64 == mac_addr)
5161                         return SUCCESS;
5162         }
5163         if (i == config->max_mc_addr) {
5164                 DBG_PRINT(ERR_DBG,
5165                         "CAM full no space left for multicast MAC\n");
5166                 return FAILURE;
5167         }
5168         /* Update the internal structure with this new mac address */
5169         do_s2io_copy_mac_addr(sp, i, mac_addr);
5170
5171         return (do_s2io_add_mac(sp, mac_addr, i));
5172 }
5173
5174 /* add MAC address to CAM */
5175 static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
5176 {
5177         u64 val64;
5178         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5179
5180         writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
5181                 &bar0->rmac_addr_data0_mem);
5182
5183         val64 =
5184                 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5185                 RMAC_ADDR_CMD_MEM_OFFSET(off);
5186         writeq(val64, &bar0->rmac_addr_cmd_mem);
5187
5188         /* Wait till command completes */
5189         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5190                 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5191                 S2IO_BIT_RESET)) {
5192                 DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
5193                 return FAILURE;
5194         }
5195         return SUCCESS;
5196 }
5197 /* deletes a specified unicast/multicast mac entry from CAM */
5198 static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5199 {
5200         int offset;
5201         u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5202         struct config_param *config = &sp->config;
5203
5204         for (offset = 1;
5205                 offset < config->max_mc_addr; offset++) {
5206                 tmp64 = do_s2io_read_unicast_mc(sp, offset);
5207                 if (tmp64 == addr) {
5208                         /* disable the entry by writing  0xffffffffffffULL */
5209                         if (do_s2io_add_mac(sp, dis_addr, offset) ==  FAILURE)
5210                                 return FAILURE;
5211                         /* store the new mac list from CAM */
5212                         do_s2io_store_unicast_mc(sp);
5213                         return SUCCESS;
5214                 }
5215         }
5216         DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
5217                         (unsigned long long)addr);
5218         return FAILURE;
5219 }
5220
5221 /* read mac entries from CAM */
5222 static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5223 {
5224         u64 tmp64 = 0xffffffffffff0000ULL, val64;
5225         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5226
5227         /* read mac addr */
5228         val64 =
5229                 RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5230                 RMAC_ADDR_CMD_MEM_OFFSET(offset);
5231         writeq(val64, &bar0->rmac_addr_cmd_mem);
5232
5233         /* Wait till command completes */
5234         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5235                 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5236                 S2IO_BIT_RESET)) {
5237                 DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5238                 return FAILURE;
5239         }
5240         tmp64 = readq(&bar0->rmac_addr_data0_mem);
5241         return (tmp64 >> 16);
5242 }
5243
5244 /**
5245  * s2io_set_mac_addr driver entry point
5246  */
5247
5248 static int s2io_set_mac_addr(struct net_device *dev, void *p)
5249 {
5250         struct sockaddr *addr = p;
5251
5252         if (!is_valid_ether_addr(addr->sa_data))
5253                 return -EINVAL;
5254
5255         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5256
5257         /* store the MAC address in CAM */
5258         return (do_s2io_prog_unicast(dev, dev->dev_addr));
5259 }
5260 /**
5261  *  do_s2io_prog_unicast - Programs the Xframe mac address
5262  *  @dev : pointer to the device structure.
5263  *  @addr: a uchar pointer to the new mac address which is to be set.
5264  *  Description : This procedure will program the Xframe to receive
5265  *  frames with new Mac Address
5266  *  Return value: SUCCESS on success and an appropriate (-)ve integer
5267  *  as defined in errno.h file on failure.
5268  */
5269
5270 static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
5271 {
5272         struct s2io_nic *sp = dev->priv;
5273         register u64 mac_addr = 0, perm_addr = 0;
5274         int i;
5275         u64 tmp64;
5276         struct config_param *config = &sp->config;
5277
5278         /*
5279         * Set the new MAC address as the new unicast filter and reflect this
5280         * change on the device address registered with the OS. It will be
5281         * at offset 0.
5282         */
5283         for (i = 0; i < ETH_ALEN; i++) {
5284                 mac_addr <<= 8;
5285                 mac_addr |= addr[i];
5286                 perm_addr <<= 8;
5287                 perm_addr |= sp->def_mac_addr[0].mac_addr[i];
5288         }
5289
5290         /* check if the dev_addr is different than perm_addr */
5291         if (mac_addr == perm_addr)
5292                 return SUCCESS;
5293
5294         /* check if the mac already preset in CAM */
5295         for (i = 1; i < config->max_mac_addr; i++) {
5296                 tmp64 = do_s2io_read_unicast_mc(sp, i);
5297                 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5298                         break;
5299
5300                 if (tmp64 == mac_addr) {
5301                         DBG_PRINT(INFO_DBG,
5302                         "MAC addr:0x%llx already present in CAM\n",
5303                         (unsigned long long)mac_addr);
5304                         return SUCCESS;
5305                 }
5306         }
5307         if (i == config->max_mac_addr) {
5308                 DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5309                 return FAILURE;
5310         }
5311         /* Update the internal structure with this new mac address */
5312         do_s2io_copy_mac_addr(sp, i, mac_addr);
5313         return (do_s2io_add_mac(sp, mac_addr, i));
5314 }
5315
5316 /**
5317  * s2io_ethtool_sset - Sets different link parameters.
5318  * @sp : private member of the device structure, which is a pointer to the  * s2io_nic structure.
5319  * @info: pointer to the structure with parameters given by ethtool to set
5320  * link information.
5321  * Description:
5322  * The function sets different link parameters provided by the user onto
5323  * the NIC.
5324  * Return value:
5325  * 0 on success.
5326 */
5327
5328 static int s2io_ethtool_sset(struct net_device *dev,
5329                              struct ethtool_cmd *info)
5330 {
5331         struct s2io_nic *sp = dev->priv;
5332         if ((info->autoneg == AUTONEG_ENABLE) ||
5333             (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
5334                 return -EINVAL;
5335         else {
5336                 s2io_close(sp->dev);
5337                 s2io_open(sp->dev);
5338         }
5339
5340         return 0;
5341 }
5342
5343 /**
5344  * s2io_ethtol_gset - Return link specific information.
5345  * @sp : private member of the device structure, pointer to the
5346  *      s2io_nic structure.
5347  * @info : pointer to the structure with parameters given by ethtool
5348  * to return link information.
5349  * Description:
5350  * Returns link specific information like speed, duplex etc.. to ethtool.
5351  * Return value :
5352  * return 0 on success.
5353  */
5354
5355 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
5356 {
5357         struct s2io_nic *sp = dev->priv;
5358         info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5359         info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5360         info->port = PORT_FIBRE;
5361
5362         /* info->transceiver */
5363         info->transceiver = XCVR_EXTERNAL;
5364
5365         if (netif_carrier_ok(sp->dev)) {
5366                 info->speed = 10000;
5367                 info->duplex = DUPLEX_FULL;
5368         } else {
5369                 info->speed = -1;
5370                 info->duplex = -1;
5371         }
5372
5373         info->autoneg = AUTONEG_DISABLE;
5374         return 0;
5375 }
5376
5377 /**
5378  * s2io_ethtool_gdrvinfo - Returns driver specific information.
5379  * @sp : private member of the device structure, which is a pointer to the
5380  * s2io_nic structure.
5381  * @info : pointer to the structure with parameters given by ethtool to
5382  * return driver information.
5383  * Description:
5384  * Returns driver specefic information like name, version etc.. to ethtool.
5385  * Return value:
5386  *  void
5387  */
5388
5389 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5390                                   struct ethtool_drvinfo *info)
5391 {
5392         struct s2io_nic *sp = dev->priv;
5393
5394         strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
5395         strncpy(info->version, s2io_driver_version, sizeof(info->version));
5396         strncpy(info->fw_version, "", sizeof(info->fw_version));
5397         strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5398         info->regdump_len = XENA_REG_SPACE;
5399         info->eedump_len = XENA_EEPROM_SPACE;
5400 }
5401
5402 /**
5403  *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5404  *  @sp: private member of the device structure, which is a pointer to the
5405  *  s2io_nic structure.
5406  *  @regs : pointer to the structure with parameters given by ethtool for
5407  *  dumping the registers.
5408  *  @reg_space: The input argumnet into which all the registers are dumped.
5409  *  Description:
5410  *  Dumps the entire register space of xFrame NIC into the user given
5411  *  buffer area.
5412  * Return value :
5413  * void .
5414 */
5415
5416 static void s2io_ethtool_gregs(struct net_device *dev,
5417                                struct ethtool_regs *regs, void *space)
5418 {
5419         int i;
5420         u64 reg;
5421         u8 *reg_space = (u8 *) space;
5422         struct s2io_nic *sp = dev->priv;
5423
5424         regs->len = XENA_REG_SPACE;
5425         regs->version = sp->pdev->subsystem_device;
5426
5427         for (i = 0; i < regs->len; i += 8) {
5428                 reg = readq(sp->bar0 + i);
5429                 memcpy((reg_space + i), &reg, 8);
5430         }
5431 }
5432
5433 /**
5434  *  s2io_phy_id  - timer function that alternates adapter LED.
5435  *  @data : address of the private member of the device structure, which
5436  *  is a pointer to the s2io_nic structure, provided as an u32.
5437  * Description: This is actually the timer function that alternates the
5438  * adapter LED bit of the adapter control bit to set/reset every time on
5439  * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
5440  *  once every second.
5441 */
5442 static void s2io_phy_id(unsigned long data)
5443 {
5444         struct s2io_nic *sp = (struct s2io_nic *) data;
5445         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5446         u64 val64 = 0;
5447         u16 subid;
5448
5449         subid = sp->pdev->subsystem_device;
5450         if ((sp->device_type == XFRAME_II_DEVICE) ||
5451                    ((subid & 0xFF) >= 0x07)) {
5452                 val64 = readq(&bar0->gpio_control);
5453                 val64 ^= GPIO_CTRL_GPIO_0;
5454                 writeq(val64, &bar0->gpio_control);
5455         } else {
5456                 val64 = readq(&bar0->adapter_control);
5457                 val64 ^= ADAPTER_LED_ON;
5458                 writeq(val64, &bar0->adapter_control);
5459         }
5460
5461         mod_timer(&sp->id_timer, jiffies + HZ / 2);
5462 }
5463
5464 /**
5465  * s2io_ethtool_idnic - To physically identify the nic on the system.
5466  * @sp : private member of the device structure, which is a pointer to the
5467  * s2io_nic structure.
5468  * @id : pointer to the structure with identification parameters given by
5469  * ethtool.
5470  * Description: Used to physically identify the NIC on the system.
5471  * The Link LED will blink for a time specified by the user for
5472  * identification.
5473  * NOTE: The Link has to be Up to be able to blink the LED. Hence
5474  * identification is possible only if it's link is up.
5475  * Return value:
5476  * int , returns 0 on success
5477  */
5478
5479 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
5480 {
5481         u64 val64 = 0, last_gpio_ctrl_val;
5482         struct s2io_nic *sp = dev->priv;
5483         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5484         u16 subid;
5485
5486         subid = sp->pdev->subsystem_device;
5487         last_gpio_ctrl_val = readq(&bar0->gpio_control);
5488         if ((sp->device_type == XFRAME_I_DEVICE) &&
5489                 ((subid & 0xFF) < 0x07)) {
5490                 val64 = readq(&bar0->adapter_control);
5491                 if (!(val64 & ADAPTER_CNTL_EN)) {
5492                         printk(KERN_ERR
5493                                "Adapter Link down, cannot blink LED\n");
5494                         return -EFAULT;
5495                 }
5496         }
5497         if (sp->id_timer.function == NULL) {
5498                 init_timer(&sp->id_timer);
5499                 sp->id_timer.function = s2io_phy_id;
5500                 sp->id_timer.data = (unsigned long) sp;
5501         }
5502         mod_timer(&sp->id_timer, jiffies);
5503         if (data)
5504                 msleep_interruptible(data * HZ);
5505         else
5506                 msleep_interruptible(MAX_FLICKER_TIME);
5507         del_timer_sync(&sp->id_timer);
5508
5509         if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
5510                 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
5511                 last_gpio_ctrl_val = readq(&bar0->gpio_control);
5512         }
5513
5514         return 0;
5515 }
5516
5517 static void s2io_ethtool_gringparam(struct net_device *dev,
5518                                     struct ethtool_ringparam *ering)
5519 {
5520         struct s2io_nic *sp = dev->priv;
5521         int i,tx_desc_count=0,rx_desc_count=0;
5522
5523         if (sp->rxd_mode == RXD_MODE_1)
5524                 ering->rx_max_pending = MAX_RX_DESC_1;
5525         else if (sp->rxd_mode == RXD_MODE_3B)
5526                 ering->rx_max_pending = MAX_RX_DESC_2;
5527
5528         ering->tx_max_pending = MAX_TX_DESC;
5529         for (i = 0 ; i < sp->config.tx_fifo_num ; i++)
5530                 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5531
5532         DBG_PRINT(INFO_DBG,"\nmax txds : %d\n",sp->config.max_txds);
5533         ering->tx_pending = tx_desc_count;
5534         rx_desc_count = 0;
5535         for (i = 0 ; i < sp->config.rx_ring_num ; i++)
5536                 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5537
5538         ering->rx_pending = rx_desc_count;
5539
55