[netdrvr] Trim trailing whitespace for several drivers
[linux-2.6.git] / drivers / net / s2io.c
1 /************************************************************************
2  * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3  * Copyright(c) 2002-2007 Neterion Inc.
4
5  * This software may be used and distributed according to the terms of
6  * the GNU General Public License (GPL), incorporated herein by reference.
7  * Drivers based on or derived from this code fall under the GPL and must
8  * retain the authorship, copyright and license notice.  This file is not
9  * a complete program and may only be used when the entire operating
10  * system is licensed under the GPL.
11  * See the file COPYING in this distribution for more information.
12  *
13  * Credits:
14  * Jeff Garzik          : For pointing out the improper error condition
15  *                        check in the s2io_xmit routine and also some
16  *                        issues in the Tx watch dog function. Also for
17  *                        patiently answering all those innumerable
18  *                        questions regaring the 2.6 porting issues.
19  * Stephen Hemminger    : Providing proper 2.6 porting mechanism for some
20  *                        macros available only in 2.6 Kernel.
21  * Francois Romieu      : For pointing out all code part that were
22  *                        deprecated and also styling related comments.
23  * Grant Grundler       : For helping me get rid of some Architecture
24  *                        dependent code.
25  * Christopher Hellwig  : Some more 2.6 specific issues in the driver.
26  *
27  * The module loadable parameters that are supported by the driver and a brief
28  * explaination of all the variables.
29  *
30  * rx_ring_num : This can be used to program the number of receive rings used
31  * in the driver.
32  * rx_ring_sz: This defines the number of receive blocks each ring can have.
33  *     This is also an array of size 8.
34  * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35  *              values are 1, 2.
36  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37  * tx_fifo_len: This too is an array of 8. Each element defines the number of
38  * Tx descriptors that can be associated with each corresponding FIFO.
39  * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40  *     2(MSI_X). Default value is '2(MSI_X)'
41  * lro_enable: Specifies whether to enable Large Receive Offload (LRO) or not.
42  *     Possible values '1' for enable '0' for disable. Default is '0'
43  * lro_max_pkts: This parameter defines maximum number of packets can be
44  *     aggregated as a single large packet
45  * napi: This parameter used to enable/disable NAPI (polling Rx)
46  *     Possible values '1' for enable and '0' for disable. Default is '1'
47  * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48  *      Possible values '1' for enable and '0' for disable. Default is '0'
49  * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50  *                 Possible values '1' for enable , '0' for disable.
51  *                 Default is '2' - which means disable in promisc mode
52  *                 and enable in non-promiscuous mode.
53  * multiq: This parameter used to enable/disable MULTIQUEUE support.
54  *      Possible values '1' for enable and '0' for disable. Default is '0'
55  ************************************************************************/
56
57 #include <linux/module.h>
58 #include <linux/types.h>
59 #include <linux/errno.h>
60 #include <linux/ioport.h>
61 #include <linux/pci.h>
62 #include <linux/dma-mapping.h>
63 #include <linux/kernel.h>
64 #include <linux/netdevice.h>
65 #include <linux/etherdevice.h>
66 #include <linux/skbuff.h>
67 #include <linux/init.h>
68 #include <linux/delay.h>
69 #include <linux/stddef.h>
70 #include <linux/ioctl.h>
71 #include <linux/timex.h>
72 #include <linux/ethtool.h>
73 #include <linux/workqueue.h>
74 #include <linux/if_vlan.h>
75 #include <linux/ip.h>
76 #include <linux/tcp.h>
77 #include <net/tcp.h>
78
79 #include <asm/system.h>
80 #include <asm/uaccess.h>
81 #include <asm/io.h>
82 #include <asm/div64.h>
83 #include <asm/irq.h>
84
85 /* local include */
86 #include "s2io.h"
87 #include "s2io-regs.h"
88
89 #define DRV_VERSION "2.0.26.23"
90
91 /* S2io Driver name & version. */
92 static char s2io_driver_name[] = "Neterion";
93 static char s2io_driver_version[] = DRV_VERSION;
94
95 static int rxd_size[2] = {32,48};
96 static int rxd_count[2] = {127,85};
97
98 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
99 {
100         int ret;
101
102         ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
103                 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
104
105         return ret;
106 }
107
108 /*
109  * Cards with following subsystem_id have a link state indication
110  * problem, 600B, 600C, 600D, 640B, 640C and 640D.
111  * macro below identifies these cards given the subsystem_id.
112  */
113 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
114         (dev_type == XFRAME_I_DEVICE) ?                 \
115                 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
116                  ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
117
118 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
119                                       ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
120
121 static inline int is_s2io_card_up(const struct s2io_nic * sp)
122 {
123         return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
124 }
125
126 /* Ethtool related variables and Macros. */
127 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
128         "Register test\t(offline)",
129         "Eeprom test\t(offline)",
130         "Link test\t(online)",
131         "RLDRAM test\t(offline)",
132         "BIST Test\t(offline)"
133 };
134
135 static char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
136         {"tmac_frms"},
137         {"tmac_data_octets"},
138         {"tmac_drop_frms"},
139         {"tmac_mcst_frms"},
140         {"tmac_bcst_frms"},
141         {"tmac_pause_ctrl_frms"},
142         {"tmac_ttl_octets"},
143         {"tmac_ucst_frms"},
144         {"tmac_nucst_frms"},
145         {"tmac_any_err_frms"},
146         {"tmac_ttl_less_fb_octets"},
147         {"tmac_vld_ip_octets"},
148         {"tmac_vld_ip"},
149         {"tmac_drop_ip"},
150         {"tmac_icmp"},
151         {"tmac_rst_tcp"},
152         {"tmac_tcp"},
153         {"tmac_udp"},
154         {"rmac_vld_frms"},
155         {"rmac_data_octets"},
156         {"rmac_fcs_err_frms"},
157         {"rmac_drop_frms"},
158         {"rmac_vld_mcst_frms"},
159         {"rmac_vld_bcst_frms"},
160         {"rmac_in_rng_len_err_frms"},
161         {"rmac_out_rng_len_err_frms"},
162         {"rmac_long_frms"},
163         {"rmac_pause_ctrl_frms"},
164         {"rmac_unsup_ctrl_frms"},
165         {"rmac_ttl_octets"},
166         {"rmac_accepted_ucst_frms"},
167         {"rmac_accepted_nucst_frms"},
168         {"rmac_discarded_frms"},
169         {"rmac_drop_events"},
170         {"rmac_ttl_less_fb_octets"},
171         {"rmac_ttl_frms"},
172         {"rmac_usized_frms"},
173         {"rmac_osized_frms"},
174         {"rmac_frag_frms"},
175         {"rmac_jabber_frms"},
176         {"rmac_ttl_64_frms"},
177         {"rmac_ttl_65_127_frms"},
178         {"rmac_ttl_128_255_frms"},
179         {"rmac_ttl_256_511_frms"},
180         {"rmac_ttl_512_1023_frms"},
181         {"rmac_ttl_1024_1518_frms"},
182         {"rmac_ip"},
183         {"rmac_ip_octets"},
184         {"rmac_hdr_err_ip"},
185         {"rmac_drop_ip"},
186         {"rmac_icmp"},
187         {"rmac_tcp"},
188         {"rmac_udp"},
189         {"rmac_err_drp_udp"},
190         {"rmac_xgmii_err_sym"},
191         {"rmac_frms_q0"},
192         {"rmac_frms_q1"},
193         {"rmac_frms_q2"},
194         {"rmac_frms_q3"},
195         {"rmac_frms_q4"},
196         {"rmac_frms_q5"},
197         {"rmac_frms_q6"},
198         {"rmac_frms_q7"},
199         {"rmac_full_q0"},
200         {"rmac_full_q1"},
201         {"rmac_full_q2"},
202         {"rmac_full_q3"},
203         {"rmac_full_q4"},
204         {"rmac_full_q5"},
205         {"rmac_full_q6"},
206         {"rmac_full_q7"},
207         {"rmac_pause_cnt"},
208         {"rmac_xgmii_data_err_cnt"},
209         {"rmac_xgmii_ctrl_err_cnt"},
210         {"rmac_accepted_ip"},
211         {"rmac_err_tcp"},
212         {"rd_req_cnt"},
213         {"new_rd_req_cnt"},
214         {"new_rd_req_rtry_cnt"},
215         {"rd_rtry_cnt"},
216         {"wr_rtry_rd_ack_cnt"},
217         {"wr_req_cnt"},
218         {"new_wr_req_cnt"},
219         {"new_wr_req_rtry_cnt"},
220         {"wr_rtry_cnt"},
221         {"wr_disc_cnt"},
222         {"rd_rtry_wr_ack_cnt"},
223         {"txp_wr_cnt"},
224         {"txd_rd_cnt"},
225         {"txd_wr_cnt"},
226         {"rxd_rd_cnt"},
227         {"rxd_wr_cnt"},
228         {"txf_rd_cnt"},
229         {"rxf_wr_cnt"}
230 };
231
232 static char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
233         {"rmac_ttl_1519_4095_frms"},
234         {"rmac_ttl_4096_8191_frms"},
235         {"rmac_ttl_8192_max_frms"},
236         {"rmac_ttl_gt_max_frms"},
237         {"rmac_osized_alt_frms"},
238         {"rmac_jabber_alt_frms"},
239         {"rmac_gt_max_alt_frms"},
240         {"rmac_vlan_frms"},
241         {"rmac_len_discard"},
242         {"rmac_fcs_discard"},
243         {"rmac_pf_discard"},
244         {"rmac_da_discard"},
245         {"rmac_red_discard"},
246         {"rmac_rts_discard"},
247         {"rmac_ingm_full_discard"},
248         {"link_fault_cnt"}
249 };
250
251 static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
252         {"\n DRIVER STATISTICS"},
253         {"single_bit_ecc_errs"},
254         {"double_bit_ecc_errs"},
255         {"parity_err_cnt"},
256         {"serious_err_cnt"},
257         {"soft_reset_cnt"},
258         {"fifo_full_cnt"},
259         {"ring_0_full_cnt"},
260         {"ring_1_full_cnt"},
261         {"ring_2_full_cnt"},
262         {"ring_3_full_cnt"},
263         {"ring_4_full_cnt"},
264         {"ring_5_full_cnt"},
265         {"ring_6_full_cnt"},
266         {"ring_7_full_cnt"},
267         {"alarm_transceiver_temp_high"},
268         {"alarm_transceiver_temp_low"},
269         {"alarm_laser_bias_current_high"},
270         {"alarm_laser_bias_current_low"},
271         {"alarm_laser_output_power_high"},
272         {"alarm_laser_output_power_low"},
273         {"warn_transceiver_temp_high"},
274         {"warn_transceiver_temp_low"},
275         {"warn_laser_bias_current_high"},
276         {"warn_laser_bias_current_low"},
277         {"warn_laser_output_power_high"},
278         {"warn_laser_output_power_low"},
279         {"lro_aggregated_pkts"},
280         {"lro_flush_both_count"},
281         {"lro_out_of_sequence_pkts"},
282         {"lro_flush_due_to_max_pkts"},
283         {"lro_avg_aggr_pkts"},
284         {"mem_alloc_fail_cnt"},
285         {"pci_map_fail_cnt"},
286         {"watchdog_timer_cnt"},
287         {"mem_allocated"},
288         {"mem_freed"},
289         {"link_up_cnt"},
290         {"link_down_cnt"},
291         {"link_up_time"},
292         {"link_down_time"},
293         {"tx_tcode_buf_abort_cnt"},
294         {"tx_tcode_desc_abort_cnt"},
295         {"tx_tcode_parity_err_cnt"},
296         {"tx_tcode_link_loss_cnt"},
297         {"tx_tcode_list_proc_err_cnt"},
298         {"rx_tcode_parity_err_cnt"},
299         {"rx_tcode_abort_cnt"},
300         {"rx_tcode_parity_abort_cnt"},
301         {"rx_tcode_rda_fail_cnt"},
302         {"rx_tcode_unkn_prot_cnt"},
303         {"rx_tcode_fcs_err_cnt"},
304         {"rx_tcode_buf_size_err_cnt"},
305         {"rx_tcode_rxd_corrupt_cnt"},
306         {"rx_tcode_unkn_err_cnt"},
307         {"tda_err_cnt"},
308         {"pfc_err_cnt"},
309         {"pcc_err_cnt"},
310         {"tti_err_cnt"},
311         {"tpa_err_cnt"},
312         {"sm_err_cnt"},
313         {"lso_err_cnt"},
314         {"mac_tmac_err_cnt"},
315         {"mac_rmac_err_cnt"},
316         {"xgxs_txgxs_err_cnt"},
317         {"xgxs_rxgxs_err_cnt"},
318         {"rc_err_cnt"},
319         {"prc_pcix_err_cnt"},
320         {"rpa_err_cnt"},
321         {"rda_err_cnt"},
322         {"rti_err_cnt"},
323         {"mc_err_cnt"}
324 };
325
326 #define S2IO_XENA_STAT_LEN      ARRAY_SIZE(ethtool_xena_stats_keys)
327 #define S2IO_ENHANCED_STAT_LEN  ARRAY_SIZE(ethtool_enhanced_stats_keys)
328 #define S2IO_DRIVER_STAT_LEN    ARRAY_SIZE(ethtool_driver_stats_keys)
329
330 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN )
331 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN )
332
333 #define XFRAME_I_STAT_STRINGS_LEN ( XFRAME_I_STAT_LEN * ETH_GSTRING_LEN )
334 #define XFRAME_II_STAT_STRINGS_LEN ( XFRAME_II_STAT_LEN * ETH_GSTRING_LEN )
335
336 #define S2IO_TEST_LEN   ARRAY_SIZE(s2io_gstrings)
337 #define S2IO_STRINGS_LEN        S2IO_TEST_LEN * ETH_GSTRING_LEN
338
339 #define S2IO_TIMER_CONF(timer, handle, arg, exp)                \
340                         init_timer(&timer);                     \
341                         timer.function = handle;                \
342                         timer.data = (unsigned long) arg;       \
343                         mod_timer(&timer, (jiffies + exp))      \
344
345 /* copy mac addr to def_mac_addr array */
346 static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
347 {
348         sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
349         sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
350         sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
351         sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
352         sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
353         sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
354 }
355 /* Add the vlan */
356 static void s2io_vlan_rx_register(struct net_device *dev,
357                                         struct vlan_group *grp)
358 {
359         int i;
360         struct s2io_nic *nic = dev->priv;
361         unsigned long flags[MAX_TX_FIFOS];
362         struct mac_info *mac_control = &nic->mac_control;
363         struct config_param *config = &nic->config;
364
365         for (i = 0; i < config->tx_fifo_num; i++)
366                 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags[i]);
367
368         nic->vlgrp = grp;
369         for (i = config->tx_fifo_num - 1; i >= 0; i--)
370                 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock,
371                                 flags[i]);
372 }
373
374 /* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
375 static int vlan_strip_flag;
376
377 /* Unregister the vlan */
378 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
379 {
380         int i;
381         struct s2io_nic *nic = dev->priv;
382         unsigned long flags[MAX_TX_FIFOS];
383         struct mac_info *mac_control = &nic->mac_control;
384         struct config_param *config = &nic->config;
385
386         for (i = 0; i < config->tx_fifo_num; i++)
387                 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags[i]);
388
389         if (nic->vlgrp)
390                 vlan_group_set_device(nic->vlgrp, vid, NULL);
391
392         for (i = config->tx_fifo_num - 1; i >= 0; i--)
393                 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock,
394                         flags[i]);
395 }
396
397 /*
398  * Constants to be programmed into the Xena's registers, to configure
399  * the XAUI.
400  */
401
402 #define END_SIGN        0x0
403 static const u64 herc_act_dtx_cfg[] = {
404         /* Set address */
405         0x8000051536750000ULL, 0x80000515367500E0ULL,
406         /* Write data */
407         0x8000051536750004ULL, 0x80000515367500E4ULL,
408         /* Set address */
409         0x80010515003F0000ULL, 0x80010515003F00E0ULL,
410         /* Write data */
411         0x80010515003F0004ULL, 0x80010515003F00E4ULL,
412         /* Set address */
413         0x801205150D440000ULL, 0x801205150D4400E0ULL,
414         /* Write data */
415         0x801205150D440004ULL, 0x801205150D4400E4ULL,
416         /* Set address */
417         0x80020515F2100000ULL, 0x80020515F21000E0ULL,
418         /* Write data */
419         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
420         /* Done */
421         END_SIGN
422 };
423
424 static const u64 xena_dtx_cfg[] = {
425         /* Set address */
426         0x8000051500000000ULL, 0x80000515000000E0ULL,
427         /* Write data */
428         0x80000515D9350004ULL, 0x80000515D93500E4ULL,
429         /* Set address */
430         0x8001051500000000ULL, 0x80010515000000E0ULL,
431         /* Write data */
432         0x80010515001E0004ULL, 0x80010515001E00E4ULL,
433         /* Set address */
434         0x8002051500000000ULL, 0x80020515000000E0ULL,
435         /* Write data */
436         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
437         END_SIGN
438 };
439
440 /*
441  * Constants for Fixing the MacAddress problem seen mostly on
442  * Alpha machines.
443  */
444 static const u64 fix_mac[] = {
445         0x0060000000000000ULL, 0x0060600000000000ULL,
446         0x0040600000000000ULL, 0x0000600000000000ULL,
447         0x0020600000000000ULL, 0x0060600000000000ULL,
448         0x0020600000000000ULL, 0x0060600000000000ULL,
449         0x0020600000000000ULL, 0x0060600000000000ULL,
450         0x0020600000000000ULL, 0x0060600000000000ULL,
451         0x0020600000000000ULL, 0x0060600000000000ULL,
452         0x0020600000000000ULL, 0x0060600000000000ULL,
453         0x0020600000000000ULL, 0x0060600000000000ULL,
454         0x0020600000000000ULL, 0x0060600000000000ULL,
455         0x0020600000000000ULL, 0x0060600000000000ULL,
456         0x0020600000000000ULL, 0x0060600000000000ULL,
457         0x0020600000000000ULL, 0x0000600000000000ULL,
458         0x0040600000000000ULL, 0x0060600000000000ULL,
459         END_SIGN
460 };
461
462 MODULE_LICENSE("GPL");
463 MODULE_VERSION(DRV_VERSION);
464
465
466 /* Module Loadable parameters. */
467 S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
468 S2IO_PARM_INT(rx_ring_num, 1);
469 S2IO_PARM_INT(multiq, 0);
470 S2IO_PARM_INT(rx_ring_mode, 1);
471 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
472 S2IO_PARM_INT(rmac_pause_time, 0x100);
473 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
474 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
475 S2IO_PARM_INT(shared_splits, 0);
476 S2IO_PARM_INT(tmac_util_period, 5);
477 S2IO_PARM_INT(rmac_util_period, 5);
478 S2IO_PARM_INT(l3l4hdr_size, 128);
479 /* 0 is no steering, 1 is Priority steering, 2 is Default steering */
480 S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
481 /* Frequency of Rx desc syncs expressed as power of 2 */
482 S2IO_PARM_INT(rxsync_frequency, 3);
483 /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
484 S2IO_PARM_INT(intr_type, 2);
485 /* Large receive offload feature */
486 static unsigned int lro_enable;
487 module_param_named(lro, lro_enable, uint, 0);
488
489 /* Max pkts to be aggregated by LRO at one time. If not specified,
490  * aggregation happens until we hit max IP pkt size(64K)
491  */
492 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
493 S2IO_PARM_INT(indicate_max_pkts, 0);
494
495 S2IO_PARM_INT(napi, 1);
496 S2IO_PARM_INT(ufo, 0);
497 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
498
499 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
500     {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
501 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
502     {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
503 static unsigned int rts_frm_len[MAX_RX_RINGS] =
504     {[0 ...(MAX_RX_RINGS - 1)] = 0 };
505
506 module_param_array(tx_fifo_len, uint, NULL, 0);
507 module_param_array(rx_ring_sz, uint, NULL, 0);
508 module_param_array(rts_frm_len, uint, NULL, 0);
509
510 /*
511  * S2IO device table.
512  * This table lists all the devices that this driver supports.
513  */
514 static struct pci_device_id s2io_tbl[] __devinitdata = {
515         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
516          PCI_ANY_ID, PCI_ANY_ID},
517         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
518          PCI_ANY_ID, PCI_ANY_ID},
519         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
520          PCI_ANY_ID, PCI_ANY_ID},
521         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
522          PCI_ANY_ID, PCI_ANY_ID},
523         {0,}
524 };
525
526 MODULE_DEVICE_TABLE(pci, s2io_tbl);
527
528 static struct pci_error_handlers s2io_err_handler = {
529         .error_detected = s2io_io_error_detected,
530         .slot_reset = s2io_io_slot_reset,
531         .resume = s2io_io_resume,
532 };
533
534 static struct pci_driver s2io_driver = {
535       .name = "S2IO",
536       .id_table = s2io_tbl,
537       .probe = s2io_init_nic,
538       .remove = __devexit_p(s2io_rem_nic),
539       .err_handler = &s2io_err_handler,
540 };
541
542 /* A simplifier macro used both by init and free shared_mem Fns(). */
543 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
544
545 /* netqueue manipulation helper functions */
546 static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
547 {
548         int i;
549 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
550         if (sp->config.multiq) {
551                 for (i = 0; i < sp->config.tx_fifo_num; i++)
552                         netif_stop_subqueue(sp->dev, i);
553         } else
554 #endif
555         {
556                 for (i = 0; i < sp->config.tx_fifo_num; i++)
557                         sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
558                 netif_stop_queue(sp->dev);
559         }
560 }
561
562 static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
563 {
564 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
565         if (sp->config.multiq)
566                 netif_stop_subqueue(sp->dev, fifo_no);
567         else
568 #endif
569         {
570                 sp->mac_control.fifos[fifo_no].queue_state =
571                         FIFO_QUEUE_STOP;
572                 netif_stop_queue(sp->dev);
573         }
574 }
575
576 static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
577 {
578         int i;
579 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
580         if (sp->config.multiq) {
581                 for (i = 0; i < sp->config.tx_fifo_num; i++)
582                         netif_start_subqueue(sp->dev, i);
583         } else
584 #endif
585         {
586                 for (i = 0; i < sp->config.tx_fifo_num; i++)
587                         sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
588                 netif_start_queue(sp->dev);
589         }
590 }
591
592 static inline void s2io_start_tx_queue(struct s2io_nic *sp, int fifo_no)
593 {
594 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
595         if (sp->config.multiq)
596                 netif_start_subqueue(sp->dev, fifo_no);
597         else
598 #endif
599         {
600                 sp->mac_control.fifos[fifo_no].queue_state =
601                         FIFO_QUEUE_START;
602                 netif_start_queue(sp->dev);
603         }
604 }
605
606 static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
607 {
608         int i;
609 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
610         if (sp->config.multiq) {
611                 for (i = 0; i < sp->config.tx_fifo_num; i++)
612                         netif_wake_subqueue(sp->dev, i);
613         } else
614 #endif
615         {
616                 for (i = 0; i < sp->config.tx_fifo_num; i++)
617                         sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
618                 netif_wake_queue(sp->dev);
619         }
620 }
621
622 static inline void s2io_wake_tx_queue(
623         struct fifo_info *fifo, int cnt, u8 multiq)
624 {
625
626 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
627         if (multiq) {
628                 if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
629                         netif_wake_subqueue(fifo->dev, fifo->fifo_no);
630         } else
631 #endif
632         if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
633                 if (netif_queue_stopped(fifo->dev)) {
634                         fifo->queue_state = FIFO_QUEUE_START;
635                         netif_wake_queue(fifo->dev);
636                 }
637         }
638 }
639
640 /**
641  * init_shared_mem - Allocation and Initialization of Memory
642  * @nic: Device private variable.
643  * Description: The function allocates all the memory areas shared
644  * between the NIC and the driver. This includes Tx descriptors,
645  * Rx descriptors and the statistics block.
646  */
647
648 static int init_shared_mem(struct s2io_nic *nic)
649 {
650         u32 size;
651         void *tmp_v_addr, *tmp_v_addr_next;
652         dma_addr_t tmp_p_addr, tmp_p_addr_next;
653         struct RxD_block *pre_rxd_blk = NULL;
654         int i, j, blk_cnt;
655         int lst_size, lst_per_page;
656         struct net_device *dev = nic->dev;
657         unsigned long tmp;
658         struct buffAdd *ba;
659
660         struct mac_info *mac_control;
661         struct config_param *config;
662         unsigned long long mem_allocated = 0;
663
664         mac_control = &nic->mac_control;
665         config = &nic->config;
666
667
668         /* Allocation and initialization of TXDLs in FIOFs */
669         size = 0;
670         for (i = 0; i < config->tx_fifo_num; i++) {
671                 size += config->tx_cfg[i].fifo_len;
672         }
673         if (size > MAX_AVAILABLE_TXDS) {
674                 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
675                 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
676                 return -EINVAL;
677         }
678
679         size = 0;
680         for (i = 0; i < config->tx_fifo_num; i++) {
681                 size = config->tx_cfg[i].fifo_len;
682                 /*
683                  * Legal values are from 2 to 8192
684                  */
685                 if (size < 2) {
686                         DBG_PRINT(ERR_DBG, "s2io: Invalid fifo len (%d)", size);
687                         DBG_PRINT(ERR_DBG, "for fifo %d\n", i);
688                         DBG_PRINT(ERR_DBG, "s2io: Legal values for fifo len"
689                                 "are 2 to 8192\n");
690                         return -EINVAL;
691                 }
692         }
693
694         lst_size = (sizeof(struct TxD) * config->max_txds);
695         lst_per_page = PAGE_SIZE / lst_size;
696
697         for (i = 0; i < config->tx_fifo_num; i++) {
698                 int fifo_len = config->tx_cfg[i].fifo_len;
699                 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
700                 mac_control->fifos[i].list_info = kzalloc(list_holder_size,
701                                                           GFP_KERNEL);
702                 if (!mac_control->fifos[i].list_info) {
703                         DBG_PRINT(INFO_DBG,
704                                   "Malloc failed for list_info\n");
705                         return -ENOMEM;
706                 }
707                 mem_allocated += list_holder_size;
708         }
709         for (i = 0; i < config->tx_fifo_num; i++) {
710                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
711                                                 lst_per_page);
712                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
713                 mac_control->fifos[i].tx_curr_put_info.fifo_len =
714                     config->tx_cfg[i].fifo_len - 1;
715                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
716                 mac_control->fifos[i].tx_curr_get_info.fifo_len =
717                     config->tx_cfg[i].fifo_len - 1;
718                 mac_control->fifos[i].fifo_no = i;
719                 mac_control->fifos[i].nic = nic;
720                 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
721                 mac_control->fifos[i].dev = dev;
722
723                 for (j = 0; j < page_num; j++) {
724                         int k = 0;
725                         dma_addr_t tmp_p;
726                         void *tmp_v;
727                         tmp_v = pci_alloc_consistent(nic->pdev,
728                                                      PAGE_SIZE, &tmp_p);
729                         if (!tmp_v) {
730                                 DBG_PRINT(INFO_DBG,
731                                           "pci_alloc_consistent ");
732                                 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
733                                 return -ENOMEM;
734                         }
735                         /* If we got a zero DMA address(can happen on
736                          * certain platforms like PPC), reallocate.
737                          * Store virtual address of page we don't want,
738                          * to be freed later.
739                          */
740                         if (!tmp_p) {
741                                 mac_control->zerodma_virt_addr = tmp_v;
742                                 DBG_PRINT(INIT_DBG,
743                                 "%s: Zero DMA address for TxDL. ", dev->name);
744                                 DBG_PRINT(INIT_DBG,
745                                 "Virtual address %p\n", tmp_v);
746                                 tmp_v = pci_alloc_consistent(nic->pdev,
747                                                      PAGE_SIZE, &tmp_p);
748                                 if (!tmp_v) {
749                                         DBG_PRINT(INFO_DBG,
750                                           "pci_alloc_consistent ");
751                                         DBG_PRINT(INFO_DBG, "failed for TxDL\n");
752                                         return -ENOMEM;
753                                 }
754                                 mem_allocated += PAGE_SIZE;
755                         }
756                         while (k < lst_per_page) {
757                                 int l = (j * lst_per_page) + k;
758                                 if (l == config->tx_cfg[i].fifo_len)
759                                         break;
760                                 mac_control->fifos[i].list_info[l].list_virt_addr =
761                                     tmp_v + (k * lst_size);
762                                 mac_control->fifos[i].list_info[l].list_phy_addr =
763                                     tmp_p + (k * lst_size);
764                                 k++;
765                         }
766                 }
767         }
768
769         for (i = 0; i < config->tx_fifo_num; i++) {
770                 size = config->tx_cfg[i].fifo_len;
771                 mac_control->fifos[i].ufo_in_band_v
772                         = kcalloc(size, sizeof(u64), GFP_KERNEL);
773                 if (!mac_control->fifos[i].ufo_in_band_v)
774                         return -ENOMEM;
775                 mem_allocated += (size * sizeof(u64));
776         }
777
778         /* Allocation and initialization of RXDs in Rings */
779         size = 0;
780         for (i = 0; i < config->rx_ring_num; i++) {
781                 if (config->rx_cfg[i].num_rxd %
782                     (rxd_count[nic->rxd_mode] + 1)) {
783                         DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
784                         DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
785                                   i);
786                         DBG_PRINT(ERR_DBG, "RxDs per Block");
787                         return FAILURE;
788                 }
789                 size += config->rx_cfg[i].num_rxd;
790                 mac_control->rings[i].block_count =
791                         config->rx_cfg[i].num_rxd /
792                         (rxd_count[nic->rxd_mode] + 1 );
793                 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
794                         mac_control->rings[i].block_count;
795         }
796         if (nic->rxd_mode == RXD_MODE_1)
797                 size = (size * (sizeof(struct RxD1)));
798         else
799                 size = (size * (sizeof(struct RxD3)));
800
801         for (i = 0; i < config->rx_ring_num; i++) {
802                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
803                 mac_control->rings[i].rx_curr_get_info.offset = 0;
804                 mac_control->rings[i].rx_curr_get_info.ring_len =
805                     config->rx_cfg[i].num_rxd - 1;
806                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
807                 mac_control->rings[i].rx_curr_put_info.offset = 0;
808                 mac_control->rings[i].rx_curr_put_info.ring_len =
809                     config->rx_cfg[i].num_rxd - 1;
810                 mac_control->rings[i].nic = nic;
811                 mac_control->rings[i].ring_no = i;
812                 mac_control->rings[i].lro = lro_enable;
813
814                 blk_cnt = config->rx_cfg[i].num_rxd /
815                                 (rxd_count[nic->rxd_mode] + 1);
816                 /*  Allocating all the Rx blocks */
817                 for (j = 0; j < blk_cnt; j++) {
818                         struct rx_block_info *rx_blocks;
819                         int l;
820
821                         rx_blocks = &mac_control->rings[i].rx_blocks[j];
822                         size = SIZE_OF_BLOCK; //size is always page size
823                         tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
824                                                           &tmp_p_addr);
825                         if (tmp_v_addr == NULL) {
826                                 /*
827                                  * In case of failure, free_shared_mem()
828                                  * is called, which should free any
829                                  * memory that was alloced till the
830                                  * failure happened.
831                                  */
832                                 rx_blocks->block_virt_addr = tmp_v_addr;
833                                 return -ENOMEM;
834                         }
835                         mem_allocated += size;
836                         memset(tmp_v_addr, 0, size);
837                         rx_blocks->block_virt_addr = tmp_v_addr;
838                         rx_blocks->block_dma_addr = tmp_p_addr;
839                         rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
840                                                   rxd_count[nic->rxd_mode],
841                                                   GFP_KERNEL);
842                         if (!rx_blocks->rxds)
843                                 return -ENOMEM;
844                         mem_allocated +=
845                         (sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
846                         for (l=0; l<rxd_count[nic->rxd_mode];l++) {
847                                 rx_blocks->rxds[l].virt_addr =
848                                         rx_blocks->block_virt_addr +
849                                         (rxd_size[nic->rxd_mode] * l);
850                                 rx_blocks->rxds[l].dma_addr =
851                                         rx_blocks->block_dma_addr +
852                                         (rxd_size[nic->rxd_mode] * l);
853                         }
854                 }
855                 /* Interlinking all Rx Blocks */
856                 for (j = 0; j < blk_cnt; j++) {
857                         tmp_v_addr =
858                                 mac_control->rings[i].rx_blocks[j].block_virt_addr;
859                         tmp_v_addr_next =
860                                 mac_control->rings[i].rx_blocks[(j + 1) %
861                                               blk_cnt].block_virt_addr;
862                         tmp_p_addr =
863                                 mac_control->rings[i].rx_blocks[j].block_dma_addr;
864                         tmp_p_addr_next =
865                                 mac_control->rings[i].rx_blocks[(j + 1) %
866                                               blk_cnt].block_dma_addr;
867
868                         pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
869                         pre_rxd_blk->reserved_2_pNext_RxD_block =
870                             (unsigned long) tmp_v_addr_next;
871                         pre_rxd_blk->pNext_RxD_Blk_physical =
872                             (u64) tmp_p_addr_next;
873                 }
874         }
875         if (nic->rxd_mode == RXD_MODE_3B) {
876                 /*
877                  * Allocation of Storages for buffer addresses in 2BUFF mode
878                  * and the buffers as well.
879                  */
880                 for (i = 0; i < config->rx_ring_num; i++) {
881                         blk_cnt = config->rx_cfg[i].num_rxd /
882                            (rxd_count[nic->rxd_mode]+ 1);
883                         mac_control->rings[i].ba =
884                                 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
885                                      GFP_KERNEL);
886                         if (!mac_control->rings[i].ba)
887                                 return -ENOMEM;
888                         mem_allocated +=(sizeof(struct buffAdd *) * blk_cnt);
889                         for (j = 0; j < blk_cnt; j++) {
890                                 int k = 0;
891                                 mac_control->rings[i].ba[j] =
892                                         kmalloc((sizeof(struct buffAdd) *
893                                                 (rxd_count[nic->rxd_mode] + 1)),
894                                                 GFP_KERNEL);
895                                 if (!mac_control->rings[i].ba[j])
896                                         return -ENOMEM;
897                                 mem_allocated += (sizeof(struct buffAdd) *  \
898                                         (rxd_count[nic->rxd_mode] + 1));
899                                 while (k != rxd_count[nic->rxd_mode]) {
900                                         ba = &mac_control->rings[i].ba[j][k];
901
902                                         ba->ba_0_org = (void *) kmalloc
903                                             (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
904                                         if (!ba->ba_0_org)
905                                                 return -ENOMEM;
906                                         mem_allocated +=
907                                                 (BUF0_LEN + ALIGN_SIZE);
908                                         tmp = (unsigned long)ba->ba_0_org;
909                                         tmp += ALIGN_SIZE;
910                                         tmp &= ~((unsigned long) ALIGN_SIZE);
911                                         ba->ba_0 = (void *) tmp;
912
913                                         ba->ba_1_org = (void *) kmalloc
914                                             (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
915                                         if (!ba->ba_1_org)
916                                                 return -ENOMEM;
917                                         mem_allocated
918                                                 += (BUF1_LEN + ALIGN_SIZE);
919                                         tmp = (unsigned long) ba->ba_1_org;
920                                         tmp += ALIGN_SIZE;
921                                         tmp &= ~((unsigned long) ALIGN_SIZE);
922                                         ba->ba_1 = (void *) tmp;
923                                         k++;
924                                 }
925                         }
926                 }
927         }
928
929         /* Allocation and initialization of Statistics block */
930         size = sizeof(struct stat_block);
931         mac_control->stats_mem = pci_alloc_consistent
932             (nic->pdev, size, &mac_control->stats_mem_phy);
933
934         if (!mac_control->stats_mem) {
935                 /*
936                  * In case of failure, free_shared_mem() is called, which
937                  * should free any memory that was alloced till the
938                  * failure happened.
939                  */
940                 return -ENOMEM;
941         }
942         mem_allocated += size;
943         mac_control->stats_mem_sz = size;
944
945         tmp_v_addr = mac_control->stats_mem;
946         mac_control->stats_info = (struct stat_block *) tmp_v_addr;
947         memset(tmp_v_addr, 0, size);
948         DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
949                   (unsigned long long) tmp_p_addr);
950         mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
951         return SUCCESS;
952 }
953
954 /**
955  * free_shared_mem - Free the allocated Memory
956  * @nic:  Device private variable.
957  * Description: This function is to free all memory locations allocated by
958  * the init_shared_mem() function and return it to the kernel.
959  */
960
961 static void free_shared_mem(struct s2io_nic *nic)
962 {
963         int i, j, blk_cnt, size;
964         void *tmp_v_addr;
965         dma_addr_t tmp_p_addr;
966         struct mac_info *mac_control;
967         struct config_param *config;
968         int lst_size, lst_per_page;
969         struct net_device *dev;
970         int page_num = 0;
971
972         if (!nic)
973                 return;
974
975         dev = nic->dev;
976
977         mac_control = &nic->mac_control;
978         config = &nic->config;
979
980         lst_size = (sizeof(struct TxD) * config->max_txds);
981         lst_per_page = PAGE_SIZE / lst_size;
982
983         for (i = 0; i < config->tx_fifo_num; i++) {
984                 page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
985                                                         lst_per_page);
986                 for (j = 0; j < page_num; j++) {
987                         int mem_blks = (j * lst_per_page);
988                         if (!mac_control->fifos[i].list_info)
989                                 return;
990                         if (!mac_control->fifos[i].list_info[mem_blks].
991                                  list_virt_addr)
992                                 break;
993                         pci_free_consistent(nic->pdev, PAGE_SIZE,
994                                             mac_control->fifos[i].
995                                             list_info[mem_blks].
996                                             list_virt_addr,
997                                             mac_control->fifos[i].
998                                             list_info[mem_blks].
999                                             list_phy_addr);
1000                         nic->mac_control.stats_info->sw_stat.mem_freed
1001                                                 += PAGE_SIZE;
1002                 }
1003                 /* If we got a zero DMA address during allocation,
1004                  * free the page now
1005                  */
1006                 if (mac_control->zerodma_virt_addr) {
1007                         pci_free_consistent(nic->pdev, PAGE_SIZE,
1008                                             mac_control->zerodma_virt_addr,
1009                                             (dma_addr_t)0);
1010                         DBG_PRINT(INIT_DBG,
1011                                 "%s: Freeing TxDL with zero DMA addr. ",
1012                                 dev->name);
1013                         DBG_PRINT(INIT_DBG, "Virtual address %p\n",
1014                                 mac_control->zerodma_virt_addr);
1015                         nic->mac_control.stats_info->sw_stat.mem_freed
1016                                                 += PAGE_SIZE;
1017                 }
1018                 kfree(mac_control->fifos[i].list_info);
1019                 nic->mac_control.stats_info->sw_stat.mem_freed +=
1020                 (nic->config.tx_cfg[i].fifo_len *sizeof(struct list_info_hold));
1021         }
1022
1023         size = SIZE_OF_BLOCK;
1024         for (i = 0; i < config->rx_ring_num; i++) {
1025                 blk_cnt = mac_control->rings[i].block_count;
1026                 for (j = 0; j < blk_cnt; j++) {
1027                         tmp_v_addr = mac_control->rings[i].rx_blocks[j].
1028                                 block_virt_addr;
1029                         tmp_p_addr = mac_control->rings[i].rx_blocks[j].
1030                                 block_dma_addr;
1031                         if (tmp_v_addr == NULL)
1032                                 break;
1033                         pci_free_consistent(nic->pdev, size,
1034                                             tmp_v_addr, tmp_p_addr);
1035                         nic->mac_control.stats_info->sw_stat.mem_freed += size;
1036                         kfree(mac_control->rings[i].rx_blocks[j].rxds);
1037                         nic->mac_control.stats_info->sw_stat.mem_freed +=
1038                         ( sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
1039                 }
1040         }
1041
1042         if (nic->rxd_mode == RXD_MODE_3B) {
1043                 /* Freeing buffer storage addresses in 2BUFF mode. */
1044                 for (i = 0; i < config->rx_ring_num; i++) {
1045                         blk_cnt = config->rx_cfg[i].num_rxd /
1046                             (rxd_count[nic->rxd_mode] + 1);
1047                         for (j = 0; j < blk_cnt; j++) {
1048                                 int k = 0;
1049                                 if (!mac_control->rings[i].ba[j])
1050                                         continue;
1051                                 while (k != rxd_count[nic->rxd_mode]) {
1052                                         struct buffAdd *ba =
1053                                                 &mac_control->rings[i].ba[j][k];
1054                                         kfree(ba->ba_0_org);
1055                                         nic->mac_control.stats_info->sw_stat.\
1056                                         mem_freed += (BUF0_LEN + ALIGN_SIZE);
1057                                         kfree(ba->ba_1_org);
1058                                         nic->mac_control.stats_info->sw_stat.\
1059                                         mem_freed += (BUF1_LEN + ALIGN_SIZE);
1060                                         k++;
1061                                 }
1062                                 kfree(mac_control->rings[i].ba[j]);
1063                                 nic->mac_control.stats_info->sw_stat.mem_freed +=
1064                                         (sizeof(struct buffAdd) *
1065                                         (rxd_count[nic->rxd_mode] + 1));
1066                         }
1067                         kfree(mac_control->rings[i].ba);
1068                         nic->mac_control.stats_info->sw_stat.mem_freed +=
1069                         (sizeof(struct buffAdd *) * blk_cnt);
1070                 }
1071         }
1072
1073         for (i = 0; i < nic->config.tx_fifo_num; i++) {
1074                 if (mac_control->fifos[i].ufo_in_band_v) {
1075                         nic->mac_control.stats_info->sw_stat.mem_freed
1076                                 += (config->tx_cfg[i].fifo_len * sizeof(u64));
1077                         kfree(mac_control->fifos[i].ufo_in_band_v);
1078                 }
1079         }
1080
1081         if (mac_control->stats_mem) {
1082                 nic->mac_control.stats_info->sw_stat.mem_freed +=
1083                         mac_control->stats_mem_sz;
1084                 pci_free_consistent(nic->pdev,
1085                                     mac_control->stats_mem_sz,
1086                                     mac_control->stats_mem,
1087                                     mac_control->stats_mem_phy);
1088         }
1089 }
1090
1091 /**
1092  * s2io_verify_pci_mode -
1093  */
1094
1095 static int s2io_verify_pci_mode(struct s2io_nic *nic)
1096 {
1097         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1098         register u64 val64 = 0;
1099         int     mode;
1100
1101         val64 = readq(&bar0->pci_mode);
1102         mode = (u8)GET_PCI_MODE(val64);
1103
1104         if ( val64 & PCI_MODE_UNKNOWN_MODE)
1105                 return -1;      /* Unknown PCI mode */
1106         return mode;
1107 }
1108
1109 #define NEC_VENID   0x1033
1110 #define NEC_DEVID   0x0125
1111 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1112 {
1113         struct pci_dev *tdev = NULL;
1114         while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
1115                 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
1116                         if (tdev->bus == s2io_pdev->bus->parent)
1117                                 pci_dev_put(tdev);
1118                                 return 1;
1119                 }
1120         }
1121         return 0;
1122 }
1123
1124 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
1125 /**
1126  * s2io_print_pci_mode -
1127  */
1128 static int s2io_print_pci_mode(struct s2io_nic *nic)
1129 {
1130         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1131         register u64 val64 = 0;
1132         int     mode;
1133         struct config_param *config = &nic->config;
1134
1135         val64 = readq(&bar0->pci_mode);
1136         mode = (u8)GET_PCI_MODE(val64);
1137
1138         if ( val64 & PCI_MODE_UNKNOWN_MODE)
1139                 return -1;      /* Unknown PCI mode */
1140
1141         config->bus_speed = bus_speed[mode];
1142
1143         if (s2io_on_nec_bridge(nic->pdev)) {
1144                 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1145                                                         nic->dev->name);
1146                 return mode;
1147         }
1148
1149         if (val64 & PCI_MODE_32_BITS) {
1150                 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
1151         } else {
1152                 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
1153         }
1154
1155         switch(mode) {
1156                 case PCI_MODE_PCI_33:
1157                         DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
1158                         break;
1159                 case PCI_MODE_PCI_66:
1160                         DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
1161                         break;
1162                 case PCI_MODE_PCIX_M1_66:
1163                         DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
1164                         break;
1165                 case PCI_MODE_PCIX_M1_100:
1166                         DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
1167                         break;
1168                 case PCI_MODE_PCIX_M1_133:
1169                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
1170                         break;
1171                 case PCI_MODE_PCIX_M2_66:
1172                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
1173                         break;
1174                 case PCI_MODE_PCIX_M2_100:
1175                         DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
1176                         break;
1177                 case PCI_MODE_PCIX_M2_133:
1178                         DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
1179                         break;
1180                 default:
1181                         return -1;      /* Unsupported bus speed */
1182         }
1183
1184         return mode;
1185 }
1186
1187 /**
1188  *  init_tti - Initialization transmit traffic interrupt scheme
1189  *  @nic: device private variable
1190  *  @link: link status (UP/DOWN) used to enable/disable continuous
1191  *  transmit interrupts
1192  *  Description: The function configures transmit traffic interrupts
1193  *  Return Value:  SUCCESS on success and
1194  *  '-1' on failure
1195  */
1196
1197 static int init_tti(struct s2io_nic *nic, int link)
1198 {
1199         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1200         register u64 val64 = 0;
1201         int i;
1202         struct config_param *config;
1203
1204         config = &nic->config;
1205
1206         for (i = 0; i < config->tx_fifo_num; i++) {
1207                 /*
1208                  * TTI Initialization. Default Tx timer gets us about
1209                  * 250 interrupts per sec. Continuous interrupts are enabled
1210                  * by default.
1211                  */
1212                 if (nic->device_type == XFRAME_II_DEVICE) {
1213                         int count = (nic->config.bus_speed * 125)/2;
1214                         val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1215                 } else
1216                         val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1217
1218                 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1219                                 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1220                                 TTI_DATA1_MEM_TX_URNG_C(0x30) |
1221                                 TTI_DATA1_MEM_TX_TIMER_AC_EN;
1222
1223                 if (use_continuous_tx_intrs && (link == LINK_UP))
1224                         val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1225                 writeq(val64, &bar0->tti_data1_mem);
1226
1227                 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1228                                 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1229                                 TTI_DATA2_MEM_TX_UFC_C(0x40) |
1230                                 TTI_DATA2_MEM_TX_UFC_D(0x80);
1231
1232                 writeq(val64, &bar0->tti_data2_mem);
1233
1234                 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD |
1235                                 TTI_CMD_MEM_OFFSET(i);
1236                 writeq(val64, &bar0->tti_command_mem);
1237
1238                 if (wait_for_cmd_complete(&bar0->tti_command_mem,
1239                         TTI_CMD_MEM_STROBE_NEW_CMD, S2IO_BIT_RESET) != SUCCESS)
1240                         return FAILURE;
1241         }
1242
1243         return SUCCESS;
1244 }
1245
1246 /**
1247  *  init_nic - Initialization of hardware
1248  *  @nic: device private variable
1249  *  Description: The function sequentially configures every block
1250  *  of the H/W from their reset values.
1251  *  Return Value:  SUCCESS on success and
1252  *  '-1' on failure (endian settings incorrect).
1253  */
1254
1255 static int init_nic(struct s2io_nic *nic)
1256 {
1257         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1258         struct net_device *dev = nic->dev;
1259         register u64 val64 = 0;
1260         void __iomem *add;
1261         u32 time;
1262         int i, j;
1263         struct mac_info *mac_control;
1264         struct config_param *config;
1265         int dtx_cnt = 0;
1266         unsigned long long mem_share;
1267         int mem_size;
1268
1269         mac_control = &nic->mac_control;
1270         config = &nic->config;
1271
1272         /* to set the swapper controle on the card */
1273         if(s2io_set_swapper(nic)) {
1274                 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
1275                 return -EIO;
1276         }
1277
1278         /*
1279          * Herc requires EOI to be removed from reset before XGXS, so..
1280          */
1281         if (nic->device_type & XFRAME_II_DEVICE) {
1282                 val64 = 0xA500000000ULL;
1283                 writeq(val64, &bar0->sw_reset);
1284                 msleep(500);
1285                 val64 = readq(&bar0->sw_reset);
1286         }
1287
1288         /* Remove XGXS from reset state */
1289         val64 = 0;
1290         writeq(val64, &bar0->sw_reset);
1291         msleep(500);
1292         val64 = readq(&bar0->sw_reset);
1293
1294         /* Ensure that it's safe to access registers by checking
1295          * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1296          */
1297         if (nic->device_type == XFRAME_II_DEVICE) {
1298                 for (i = 0; i < 50; i++) {
1299                         val64 = readq(&bar0->adapter_status);
1300                         if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1301                                 break;
1302                         msleep(10);
1303                 }
1304                 if (i == 50)
1305                         return -ENODEV;
1306         }
1307
1308         /*  Enable Receiving broadcasts */
1309         add = &bar0->mac_cfg;
1310         val64 = readq(&bar0->mac_cfg);
1311         val64 |= MAC_RMAC_BCAST_ENABLE;
1312         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1313         writel((u32) val64, add);
1314         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1315         writel((u32) (val64 >> 32), (add + 4));
1316
1317         /* Read registers in all blocks */
1318         val64 = readq(&bar0->mac_int_mask);
1319         val64 = readq(&bar0->mc_int_mask);
1320         val64 = readq(&bar0->xgxs_int_mask);
1321
1322         /*  Set MTU */
1323         val64 = dev->mtu;
1324         writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1325
1326         if (nic->device_type & XFRAME_II_DEVICE) {
1327                 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1328                         SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1329                                           &bar0->dtx_control, UF);
1330                         if (dtx_cnt & 0x1)
1331                                 msleep(1); /* Necessary!! */
1332                         dtx_cnt++;
1333                 }
1334         } else {
1335                 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1336                         SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1337                                           &bar0->dtx_control, UF);
1338                         val64 = readq(&bar0->dtx_control);
1339                         dtx_cnt++;
1340                 }
1341         }
1342
1343         /*  Tx DMA Initialization */
1344         val64 = 0;
1345         writeq(val64, &bar0->tx_fifo_partition_0);
1346         writeq(val64, &bar0->tx_fifo_partition_1);
1347         writeq(val64, &bar0->tx_fifo_partition_2);
1348         writeq(val64, &bar0->tx_fifo_partition_3);
1349
1350
1351         for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1352                 val64 |=
1353                     vBIT(config->tx_cfg[i].fifo_len - 1, ((j * 32) + 19),
1354                          13) | vBIT(config->tx_cfg[i].fifo_priority,
1355                                     ((j * 32) + 5), 3);
1356
1357                 if (i == (config->tx_fifo_num - 1)) {
1358                         if (i % 2 == 0)
1359                                 i++;
1360                 }
1361
1362                 switch (i) {
1363                 case 1:
1364                         writeq(val64, &bar0->tx_fifo_partition_0);
1365                         val64 = 0;
1366                         j = 0;
1367                         break;
1368                 case 3:
1369                         writeq(val64, &bar0->tx_fifo_partition_1);
1370                         val64 = 0;
1371                         j = 0;
1372                         break;
1373                 case 5:
1374                         writeq(val64, &bar0->tx_fifo_partition_2);
1375                         val64 = 0;
1376                         j = 0;
1377                         break;
1378                 case 7:
1379                         writeq(val64, &bar0->tx_fifo_partition_3);
1380                         val64 = 0;
1381                         j = 0;
1382                         break;
1383                 default:
1384                         j++;
1385                         break;
1386                 }
1387         }
1388
1389         /*
1390          * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1391          * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1392          */
1393         if ((nic->device_type == XFRAME_I_DEVICE) &&
1394                 (nic->pdev->revision < 4))
1395                 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1396
1397         val64 = readq(&bar0->tx_fifo_partition_0);
1398         DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1399                   &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1400
1401         /*
1402          * Initialization of Tx_PA_CONFIG register to ignore packet
1403          * integrity checking.
1404          */
1405         val64 = readq(&bar0->tx_pa_cfg);
1406         val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1407             TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1408         writeq(val64, &bar0->tx_pa_cfg);
1409
1410         /* Rx DMA intialization. */
1411         val64 = 0;
1412         for (i = 0; i < config->rx_ring_num; i++) {
1413                 val64 |=
1414                     vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1415                          3);
1416         }
1417         writeq(val64, &bar0->rx_queue_priority);
1418
1419         /*
1420          * Allocating equal share of memory to all the
1421          * configured Rings.
1422          */
1423         val64 = 0;
1424         if (nic->device_type & XFRAME_II_DEVICE)
1425                 mem_size = 32;
1426         else
1427                 mem_size = 64;
1428
1429         for (i = 0; i < config->rx_ring_num; i++) {
1430                 switch (i) {
1431                 case 0:
1432                         mem_share = (mem_size / config->rx_ring_num +
1433                                      mem_size % config->rx_ring_num);
1434                         val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1435                         continue;
1436                 case 1:
1437                         mem_share = (mem_size / config->rx_ring_num);
1438                         val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1439                         continue;
1440                 case 2:
1441                         mem_share = (mem_size / config->rx_ring_num);
1442                         val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1443                         continue;
1444                 case 3:
1445                         mem_share = (mem_size / config->rx_ring_num);
1446                         val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1447                         continue;
1448                 case 4:
1449                         mem_share = (mem_size / config->rx_ring_num);
1450                         val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1451                         continue;
1452                 case 5:
1453                         mem_share = (mem_size / config->rx_ring_num);
1454                         val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1455                         continue;
1456                 case 6:
1457                         mem_share = (mem_size / config->rx_ring_num);
1458                         val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1459                         continue;
1460                 case 7:
1461                         mem_share = (mem_size / config->rx_ring_num);
1462                         val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1463                         continue;
1464                 }
1465         }
1466         writeq(val64, &bar0->rx_queue_cfg);
1467
1468         /*
1469          * Filling Tx round robin registers
1470          * as per the number of FIFOs for equal scheduling priority
1471          */
1472         switch (config->tx_fifo_num) {
1473         case 1:
1474                 val64 = 0x0;
1475                 writeq(val64, &bar0->tx_w_round_robin_0);
1476                 writeq(val64, &bar0->tx_w_round_robin_1);
1477                 writeq(val64, &bar0->tx_w_round_robin_2);
1478                 writeq(val64, &bar0->tx_w_round_robin_3);
1479                 writeq(val64, &bar0->tx_w_round_robin_4);
1480                 break;
1481         case 2:
1482                 val64 = 0x0001000100010001ULL;
1483                 writeq(val64, &bar0->tx_w_round_robin_0);
1484                 writeq(val64, &bar0->tx_w_round_robin_1);
1485                 writeq(val64, &bar0->tx_w_round_robin_2);
1486                 writeq(val64, &bar0->tx_w_round_robin_3);
1487                 val64 = 0x0001000100000000ULL;
1488                 writeq(val64, &bar0->tx_w_round_robin_4);
1489                 break;
1490         case 3:
1491                 val64 = 0x0001020001020001ULL;
1492                 writeq(val64, &bar0->tx_w_round_robin_0);
1493                 val64 = 0x0200010200010200ULL;
1494                 writeq(val64, &bar0->tx_w_round_robin_1);
1495                 val64 = 0x0102000102000102ULL;
1496                 writeq(val64, &bar0->tx_w_round_robin_2);
1497                 val64 = 0x0001020001020001ULL;
1498                 writeq(val64, &bar0->tx_w_round_robin_3);
1499                 val64 = 0x0200010200000000ULL;
1500                 writeq(val64, &bar0->tx_w_round_robin_4);
1501                 break;
1502         case 4:
1503                 val64 = 0x0001020300010203ULL;
1504                 writeq(val64, &bar0->tx_w_round_robin_0);
1505                 writeq(val64, &bar0->tx_w_round_robin_1);
1506                 writeq(val64, &bar0->tx_w_round_robin_2);
1507                 writeq(val64, &bar0->tx_w_round_robin_3);
1508                 val64 = 0x0001020300000000ULL;
1509                 writeq(val64, &bar0->tx_w_round_robin_4);
1510                 break;
1511         case 5:
1512                 val64 = 0x0001020304000102ULL;
1513                 writeq(val64, &bar0->tx_w_round_robin_0);
1514                 val64 = 0x0304000102030400ULL;
1515                 writeq(val64, &bar0->tx_w_round_robin_1);
1516                 val64 = 0x0102030400010203ULL;
1517                 writeq(val64, &bar0->tx_w_round_robin_2);
1518                 val64 = 0x0400010203040001ULL;
1519                 writeq(val64, &bar0->tx_w_round_robin_3);
1520                 val64 = 0x0203040000000000ULL;
1521                 writeq(val64, &bar0->tx_w_round_robin_4);
1522                 break;
1523         case 6:
1524                 val64 = 0x0001020304050001ULL;
1525                 writeq(val64, &bar0->tx_w_round_robin_0);
1526                 val64 = 0x0203040500010203ULL;
1527                 writeq(val64, &bar0->tx_w_round_robin_1);
1528                 val64 = 0x0405000102030405ULL;
1529                 writeq(val64, &bar0->tx_w_round_robin_2);
1530                 val64 = 0x0001020304050001ULL;
1531                 writeq(val64, &bar0->tx_w_round_robin_3);
1532                 val64 = 0x0203040500000000ULL;
1533                 writeq(val64, &bar0->tx_w_round_robin_4);
1534                 break;
1535         case 7:
1536                 val64 = 0x0001020304050600ULL;
1537                 writeq(val64, &bar0->tx_w_round_robin_0);
1538                 val64 = 0x0102030405060001ULL;
1539                 writeq(val64, &bar0->tx_w_round_robin_1);
1540                 val64 = 0x0203040506000102ULL;
1541                 writeq(val64, &bar0->tx_w_round_robin_2);
1542                 val64 = 0x0304050600010203ULL;
1543                 writeq(val64, &bar0->tx_w_round_robin_3);
1544                 val64 = 0x0405060000000000ULL;
1545                 writeq(val64, &bar0->tx_w_round_robin_4);
1546                 break;
1547         case 8:
1548                 val64 = 0x0001020304050607ULL;
1549                 writeq(val64, &bar0->tx_w_round_robin_0);
1550                 writeq(val64, &bar0->tx_w_round_robin_1);
1551                 writeq(val64, &bar0->tx_w_round_robin_2);
1552                 writeq(val64, &bar0->tx_w_round_robin_3);
1553                 val64 = 0x0001020300000000ULL;
1554                 writeq(val64, &bar0->tx_w_round_robin_4);
1555                 break;
1556         }
1557
1558         /* Enable all configured Tx FIFO partitions */
1559         val64 = readq(&bar0->tx_fifo_partition_0);
1560         val64 |= (TX_FIFO_PARTITION_EN);
1561         writeq(val64, &bar0->tx_fifo_partition_0);
1562
1563         /* Filling the Rx round robin registers as per the
1564          * number of Rings and steering based on QoS with
1565          * equal priority.
1566          */
1567         switch (config->rx_ring_num) {
1568         case 1:
1569                 val64 = 0x0;
1570                 writeq(val64, &bar0->rx_w_round_robin_0);
1571                 writeq(val64, &bar0->rx_w_round_robin_1);
1572                 writeq(val64, &bar0->rx_w_round_robin_2);
1573                 writeq(val64, &bar0->rx_w_round_robin_3);
1574                 writeq(val64, &bar0->rx_w_round_robin_4);
1575
1576                 val64 = 0x8080808080808080ULL;
1577                 writeq(val64, &bar0->rts_qos_steering);
1578                 break;
1579         case 2:
1580                 val64 = 0x0001000100010001ULL;
1581                 writeq(val64, &bar0->rx_w_round_robin_0);
1582                 writeq(val64, &bar0->rx_w_round_robin_1);
1583                 writeq(val64, &bar0->rx_w_round_robin_2);
1584                 writeq(val64, &bar0->rx_w_round_robin_3);
1585                 val64 = 0x0001000100000000ULL;
1586                 writeq(val64, &bar0->rx_w_round_robin_4);
1587
1588                 val64 = 0x8080808040404040ULL;
1589                 writeq(val64, &bar0->rts_qos_steering);
1590                 break;
1591         case 3:
1592                 val64 = 0x0001020001020001ULL;
1593                 writeq(val64, &bar0->rx_w_round_robin_0);
1594                 val64 = 0x0200010200010200ULL;
1595                 writeq(val64, &bar0->rx_w_round_robin_1);
1596                 val64 = 0x0102000102000102ULL;
1597                 writeq(val64, &bar0->rx_w_round_robin_2);
1598                 val64 = 0x0001020001020001ULL;
1599                 writeq(val64, &bar0->rx_w_round_robin_3);
1600                 val64 = 0x0200010200000000ULL;
1601                 writeq(val64, &bar0->rx_w_round_robin_4);
1602
1603                 val64 = 0x8080804040402020ULL;
1604                 writeq(val64, &bar0->rts_qos_steering);
1605                 break;
1606         case 4:
1607                 val64 = 0x0001020300010203ULL;
1608                 writeq(val64, &bar0->rx_w_round_robin_0);
1609                 writeq(val64, &bar0->rx_w_round_robin_1);
1610                 writeq(val64, &bar0->rx_w_round_robin_2);
1611                 writeq(val64, &bar0->rx_w_round_robin_3);
1612                 val64 = 0x0001020300000000ULL;
1613                 writeq(val64, &bar0->rx_w_round_robin_4);
1614
1615                 val64 = 0x8080404020201010ULL;
1616                 writeq(val64, &bar0->rts_qos_steering);
1617                 break;
1618         case 5:
1619                 val64 = 0x0001020304000102ULL;
1620                 writeq(val64, &bar0->rx_w_round_robin_0);
1621                 val64 = 0x0304000102030400ULL;
1622                 writeq(val64, &bar0->rx_w_round_robin_1);
1623                 val64 = 0x0102030400010203ULL;
1624                 writeq(val64, &bar0->rx_w_round_robin_2);
1625                 val64 = 0x0400010203040001ULL;
1626                 writeq(val64, &bar0->rx_w_round_robin_3);
1627                 val64 = 0x0203040000000000ULL;
1628                 writeq(val64, &bar0->rx_w_round_robin_4);
1629
1630                 val64 = 0x8080404020201008ULL;
1631                 writeq(val64, &bar0->rts_qos_steering);
1632                 break;
1633         case 6:
1634                 val64 = 0x0001020304050001ULL;
1635                 writeq(val64, &bar0->rx_w_round_robin_0);
1636                 val64 = 0x0203040500010203ULL;
1637                 writeq(val64, &bar0->rx_w_round_robin_1);
1638                 val64 = 0x0405000102030405ULL;
1639                 writeq(val64, &bar0->rx_w_round_robin_2);
1640                 val64 = 0x0001020304050001ULL;
1641                 writeq(val64, &bar0->rx_w_round_robin_3);
1642                 val64 = 0x0203040500000000ULL;
1643                 writeq(val64, &bar0->rx_w_round_robin_4);
1644
1645                 val64 = 0x8080404020100804ULL;
1646                 writeq(val64, &bar0->rts_qos_steering);
1647                 break;
1648         case 7:
1649                 val64 = 0x0001020304050600ULL;
1650                 writeq(val64, &bar0->rx_w_round_robin_0);
1651                 val64 = 0x0102030405060001ULL;
1652                 writeq(val64, &bar0->rx_w_round_robin_1);
1653                 val64 = 0x0203040506000102ULL;
1654                 writeq(val64, &bar0->rx_w_round_robin_2);
1655                 val64 = 0x0304050600010203ULL;
1656                 writeq(val64, &bar0->rx_w_round_robin_3);
1657                 val64 = 0x0405060000000000ULL;
1658                 writeq(val64, &bar0->rx_w_round_robin_4);
1659
1660                 val64 = 0x8080402010080402ULL;
1661                 writeq(val64, &bar0->rts_qos_steering);
1662                 break;
1663         case 8:
1664                 val64 = 0x0001020304050607ULL;
1665                 writeq(val64, &bar0->rx_w_round_robin_0);
1666                 writeq(val64, &bar0->rx_w_round_robin_1);
1667                 writeq(val64, &bar0->rx_w_round_robin_2);
1668                 writeq(val64, &bar0->rx_w_round_robin_3);
1669                 val64 = 0x0001020300000000ULL;
1670                 writeq(val64, &bar0->rx_w_round_robin_4);
1671
1672                 val64 = 0x8040201008040201ULL;
1673                 writeq(val64, &bar0->rts_qos_steering);
1674                 break;
1675         }
1676
1677         /* UDP Fix */
1678         val64 = 0;
1679         for (i = 0; i < 8; i++)
1680                 writeq(val64, &bar0->rts_frm_len_n[i]);
1681
1682         /* Set the default rts frame length for the rings configured */
1683         val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1684         for (i = 0 ; i < config->rx_ring_num ; i++)
1685                 writeq(val64, &bar0->rts_frm_len_n[i]);
1686
1687         /* Set the frame length for the configured rings
1688          * desired by the user
1689          */
1690         for (i = 0; i < config->rx_ring_num; i++) {
1691                 /* If rts_frm_len[i] == 0 then it is assumed that user not
1692                  * specified frame length steering.
1693                  * If the user provides the frame length then program
1694                  * the rts_frm_len register for those values or else
1695                  * leave it as it is.
1696                  */
1697                 if (rts_frm_len[i] != 0) {
1698                         writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1699                                 &bar0->rts_frm_len_n[i]);
1700                 }
1701         }
1702
1703         /* Disable differentiated services steering logic */
1704         for (i = 0; i < 64; i++) {
1705                 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1706                         DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1707                                 dev->name);
1708                         DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
1709                         return -ENODEV;
1710                 }
1711         }
1712
1713         /* Program statistics memory */
1714         writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1715
1716         if (nic->device_type == XFRAME_II_DEVICE) {
1717                 val64 = STAT_BC(0x320);
1718                 writeq(val64, &bar0->stat_byte_cnt);
1719         }
1720
1721         /*
1722          * Initializing the sampling rate for the device to calculate the
1723          * bandwidth utilization.
1724          */
1725         val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1726             MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1727         writeq(val64, &bar0->mac_link_util);
1728
1729         /*
1730          * Initializing the Transmit and Receive Traffic Interrupt
1731          * Scheme.
1732          */
1733
1734         /* Initialize TTI */
1735         if (SUCCESS != init_tti(nic, nic->last_link_state))
1736                 return -ENODEV;
1737
1738         /* RTI Initialization */
1739         if (nic->device_type == XFRAME_II_DEVICE) {
1740                 /*
1741                  * Programmed to generate Apprx 500 Intrs per
1742                  * second
1743                  */
1744                 int count = (nic->config.bus_speed * 125)/4;
1745                 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1746         } else
1747                 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1748         val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1749                  RTI_DATA1_MEM_RX_URNG_B(0x10) |
1750                  RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1751
1752         writeq(val64, &bar0->rti_data1_mem);
1753
1754         val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1755                 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1756         if (nic->config.intr_type == MSI_X)
1757             val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1758                         RTI_DATA2_MEM_RX_UFC_D(0x40));
1759         else
1760             val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1761                         RTI_DATA2_MEM_RX_UFC_D(0x80));
1762         writeq(val64, &bar0->rti_data2_mem);
1763
1764         for (i = 0; i < config->rx_ring_num; i++) {
1765                 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1766                                 | RTI_CMD_MEM_OFFSET(i);
1767                 writeq(val64, &bar0->rti_command_mem);
1768
1769                 /*
1770                  * Once the operation completes, the Strobe bit of the
1771                  * command register will be reset. We poll for this
1772                  * particular condition. We wait for a maximum of 500ms
1773                  * for the operation to complete, if it's not complete
1774                  * by then we return error.
1775                  */
1776                 time = 0;
1777                 while (TRUE) {
1778                         val64 = readq(&bar0->rti_command_mem);
1779                         if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1780                                 break;
1781
1782                         if (time > 10) {
1783                                 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1784                                           dev->name);
1785                                 return -ENODEV;
1786                         }
1787                         time++;
1788                         msleep(50);
1789                 }
1790         }
1791
1792         /*
1793          * Initializing proper values as Pause threshold into all
1794          * the 8 Queues on Rx side.
1795          */
1796         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1797         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1798
1799         /* Disable RMAC PAD STRIPPING */
1800         add = &bar0->mac_cfg;
1801         val64 = readq(&bar0->mac_cfg);
1802         val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1803         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1804         writel((u32) (val64), add);
1805         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1806         writel((u32) (val64 >> 32), (add + 4));
1807         val64 = readq(&bar0->mac_cfg);
1808
1809         /* Enable FCS stripping by adapter */
1810         add = &bar0->mac_cfg;
1811         val64 = readq(&bar0->mac_cfg);
1812         val64 |= MAC_CFG_RMAC_STRIP_FCS;
1813         if (nic->device_type == XFRAME_II_DEVICE)
1814                 writeq(val64, &bar0->mac_cfg);
1815         else {
1816                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1817                 writel((u32) (val64), add);
1818                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1819                 writel((u32) (val64 >> 32), (add + 4));
1820         }
1821
1822         /*
1823          * Set the time value to be inserted in the pause frame
1824          * generated by xena.
1825          */
1826         val64 = readq(&bar0->rmac_pause_cfg);
1827         val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1828         val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1829         writeq(val64, &bar0->rmac_pause_cfg);
1830
1831         /*
1832          * Set the Threshold Limit for Generating the pause frame
1833          * If the amount of data in any Queue exceeds ratio of
1834          * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1835          * pause frame is generated
1836          */
1837         val64 = 0;
1838         for (i = 0; i < 4; i++) {
1839                 val64 |=
1840                     (((u64) 0xFF00 | nic->mac_control.
1841                       mc_pause_threshold_q0q3)
1842                      << (i * 2 * 8));
1843         }
1844         writeq(val64, &bar0->mc_pause_thresh_q0q3);
1845
1846         val64 = 0;
1847         for (i = 0; i < 4; i++) {
1848                 val64 |=
1849                     (((u64) 0xFF00 | nic->mac_control.
1850                       mc_pause_threshold_q4q7)
1851                      << (i * 2 * 8));
1852         }
1853         writeq(val64, &bar0->mc_pause_thresh_q4q7);
1854
1855         /*
1856          * TxDMA will stop Read request if the number of read split has
1857          * exceeded the limit pointed by shared_splits
1858          */
1859         val64 = readq(&bar0->pic_control);
1860         val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1861         writeq(val64, &bar0->pic_control);
1862
1863         if (nic->config.bus_speed == 266) {
1864                 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1865                 writeq(0x0, &bar0->read_retry_delay);
1866                 writeq(0x0, &bar0->write_retry_delay);
1867         }
1868
1869         /*
1870          * Programming the Herc to split every write transaction
1871          * that does not start on an ADB to reduce disconnects.
1872          */
1873         if (nic->device_type == XFRAME_II_DEVICE) {
1874                 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1875                         MISC_LINK_STABILITY_PRD(3);
1876                 writeq(val64, &bar0->misc_control);
1877                 val64 = readq(&bar0->pic_control2);
1878                 val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
1879                 writeq(val64, &bar0->pic_control2);
1880         }
1881         if (strstr(nic->product_name, "CX4")) {
1882                 val64 = TMAC_AVG_IPG(0x17);
1883                 writeq(val64, &bar0->tmac_avg_ipg);
1884         }
1885
1886         return SUCCESS;
1887 }
1888 #define LINK_UP_DOWN_INTERRUPT          1
1889 #define MAC_RMAC_ERR_TIMER              2
1890
1891 static int s2io_link_fault_indication(struct s2io_nic *nic)
1892 {
1893         if (nic->config.intr_type != INTA)
1894                 return MAC_RMAC_ERR_TIMER;
1895         if (nic->device_type == XFRAME_II_DEVICE)
1896                 return LINK_UP_DOWN_INTERRUPT;
1897         else
1898                 return MAC_RMAC_ERR_TIMER;
1899 }
1900
1901 /**
1902  *  do_s2io_write_bits -  update alarm bits in alarm register
1903  *  @value: alarm bits
1904  *  @flag: interrupt status
1905  *  @addr: address value
1906  *  Description: update alarm bits in alarm register
1907  *  Return Value:
1908  *  NONE.
1909  */
1910 static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1911 {
1912         u64 temp64;
1913
1914         temp64 = readq(addr);
1915
1916         if(flag == ENABLE_INTRS)
1917                 temp64 &= ~((u64) value);
1918         else
1919                 temp64 |= ((u64) value);
1920         writeq(temp64, addr);
1921 }
1922
1923 static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1924 {
1925         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1926         register u64 gen_int_mask = 0;
1927
1928         if (mask & TX_DMA_INTR) {
1929
1930                 gen_int_mask |= TXDMA_INT_M;
1931
1932                 do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1933                                 TXDMA_PCC_INT | TXDMA_TTI_INT |
1934                                 TXDMA_LSO_INT | TXDMA_TPA_INT |
1935                                 TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1936
1937                 do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1938                                 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1939                                 PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1940                                 &bar0->pfc_err_mask);
1941
1942                 do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1943                                 TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1944                                 TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1945
1946                 do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1947                                 PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1948                                 PCC_N_SERR | PCC_6_COF_OV_ERR |
1949                                 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1950                                 PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1951                                 PCC_TXB_ECC_SG_ERR, flag, &bar0->pcc_err_mask);
1952
1953                 do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1954                                 TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1955
1956                 do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1957                                 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1958                                 LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1959                                 flag, &bar0->lso_err_mask);
1960
1961                 do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1962                                 flag, &bar0->tpa_err_mask);
1963
1964                 do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1965
1966         }
1967
1968         if (mask & TX_MAC_INTR) {
1969                 gen_int_mask |= TXMAC_INT_M;
1970                 do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1971                                 &bar0->mac_int_mask);
1972                 do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1973                                 TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1974                                 TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1975                                 flag, &bar0->mac_tmac_err_mask);
1976         }
1977
1978         if (mask & TX_XGXS_INTR) {
1979                 gen_int_mask |= TXXGXS_INT_M;
1980                 do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1981                                 &bar0->xgxs_int_mask);
1982                 do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1983                                 TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1984                                 flag, &bar0->xgxs_txgxs_err_mask);
1985         }
1986
1987         if (mask & RX_DMA_INTR) {
1988                 gen_int_mask |= RXDMA_INT_M;
1989                 do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1990                                 RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1991                                 flag, &bar0->rxdma_int_mask);
1992                 do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1993                                 RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1994                                 RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1995                                 RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1996                 do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1997                                 PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1998                                 PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1999                                 &bar0->prc_pcix_err_mask);
2000                 do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
2001                                 RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
2002                                 &bar0->rpa_err_mask);
2003                 do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
2004                                 RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
2005                                 RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
2006                                 RDA_FRM_ECC_SG_ERR | RDA_MISC_ERR|RDA_PCIX_ERR,
2007                                 flag, &bar0->rda_err_mask);
2008                 do_s2io_write_bits(RTI_SM_ERR_ALARM |
2009                                 RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
2010                                 flag, &bar0->rti_err_mask);
2011         }
2012
2013         if (mask & RX_MAC_INTR) {
2014                 gen_int_mask |= RXMAC_INT_M;
2015                 do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
2016                                 &bar0->mac_int_mask);
2017                 do_s2io_write_bits(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
2018                                 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
2019                                 RMAC_DOUBLE_ECC_ERR |
2020                                 RMAC_LINK_STATE_CHANGE_INT,
2021                                 flag, &bar0->mac_rmac_err_mask);
2022         }
2023
2024         if (mask & RX_XGXS_INTR)
2025         {
2026                 gen_int_mask |= RXXGXS_INT_M;
2027                 do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
2028                                 &bar0->xgxs_int_mask);
2029                 do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
2030                                 &bar0->xgxs_rxgxs_err_mask);
2031         }
2032
2033         if (mask & MC_INTR) {
2034                 gen_int_mask |= MC_INT_M;
2035                 do_s2io_write_bits(MC_INT_MASK_MC_INT, flag, &bar0->mc_int_mask);
2036                 do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
2037                                 MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
2038                                 &bar0->mc_err_mask);
2039         }
2040         nic->general_int_mask = gen_int_mask;
2041
2042         /* Remove this line when alarm interrupts are enabled */
2043         nic->general_int_mask = 0;
2044 }
2045 /**
2046  *  en_dis_able_nic_intrs - Enable or Disable the interrupts
2047  *  @nic: device private variable,
2048  *  @mask: A mask indicating which Intr block must be modified and,
2049  *  @flag: A flag indicating whether to enable or disable the Intrs.
2050  *  Description: This function will either disable or enable the interrupts
2051  *  depending on the flag argument. The mask argument can be used to
2052  *  enable/disable any Intr block.
2053  *  Return Value: NONE.
2054  */
2055
2056 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
2057 {
2058         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2059         register u64 temp64 = 0, intr_mask = 0;
2060
2061         intr_mask = nic->general_int_mask;
2062
2063         /*  Top level interrupt classification */
2064         /*  PIC Interrupts */
2065         if (mask & TX_PIC_INTR) {
2066                 /*  Enable PIC Intrs in the general intr mask register */
2067                 intr_mask |= TXPIC_INT_M;
2068                 if (flag == ENABLE_INTRS) {
2069                         /*
2070                          * If Hercules adapter enable GPIO otherwise
2071                          * disable all PCIX, Flash, MDIO, IIC and GPIO
2072                          * interrupts for now.
2073                          * TODO
2074                          */
2075                         if (s2io_link_fault_indication(nic) ==
2076                                         LINK_UP_DOWN_INTERRUPT ) {
2077                                 do_s2io_write_bits(PIC_INT_GPIO, flag,
2078                                                 &bar0->pic_int_mask);
2079                                 do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
2080                                                 &bar0->gpio_int_mask);
2081                         } else
2082                                 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2083                 } else if (flag == DISABLE_INTRS) {
2084                         /*
2085                          * Disable PIC Intrs in the general
2086                          * intr mask register
2087                          */
2088                         writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2089                 }
2090         }
2091
2092         /*  Tx traffic interrupts */
2093         if (mask & TX_TRAFFIC_INTR) {
2094                 intr_mask |= TXTRAFFIC_INT_M;
2095                 if (flag == ENABLE_INTRS) {
2096                         /*
2097                          * Enable all the Tx side interrupts
2098                          * writing 0 Enables all 64 TX interrupt levels
2099                          */
2100                         writeq(0x0, &bar0->tx_traffic_mask);
2101                 } else if (flag == DISABLE_INTRS) {
2102                         /*
2103                          * Disable Tx Traffic Intrs in the general intr mask
2104                          * register.
2105                          */
2106                         writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
2107                 }
2108         }
2109
2110         /*  Rx traffic interrupts */
2111         if (mask & RX_TRAFFIC_INTR) {
2112                 intr_mask |= RXTRAFFIC_INT_M;
2113                 if (flag == ENABLE_INTRS) {
2114                         /* writing 0 Enables all 8 RX interrupt levels */
2115                         writeq(0x0, &bar0->rx_traffic_mask);
2116                 } else if (flag == DISABLE_INTRS) {
2117                         /*
2118                          * Disable Rx Traffic Intrs in the general intr mask
2119                          * register.
2120                          */
2121                         writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
2122                 }
2123         }
2124
2125         temp64 = readq(&bar0->general_int_mask);
2126         if (flag == ENABLE_INTRS)
2127                 temp64 &= ~((u64) intr_mask);
2128         else
2129                 temp64 = DISABLE_ALL_INTRS;
2130         writeq(temp64, &bar0->general_int_mask);
2131
2132         nic->general_int_mask = readq(&bar0->general_int_mask);
2133 }
2134
2135 /**
2136  *  verify_pcc_quiescent- Checks for PCC quiescent state
2137  *  Return: 1 If PCC is quiescence
2138  *          0 If PCC is not quiescence
2139  */
2140 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
2141 {
2142         int ret = 0, herc;
2143         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2144         u64 val64 = readq(&bar0->adapter_status);
2145
2146         herc = (sp->device_type == XFRAME_II_DEVICE);
2147
2148         if (flag == FALSE) {
2149                 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2150                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2151                                 ret = 1;
2152                 } else {
2153                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2154                                 ret = 1;
2155                 }
2156         } else {
2157                 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2158                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2159                              ADAPTER_STATUS_RMAC_PCC_IDLE))
2160                                 ret = 1;
2161                 } else {
2162                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2163                              ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2164                                 ret = 1;
2165                 }
2166         }
2167
2168         return ret;
2169 }
2170 /**
2171  *  verify_xena_quiescence - Checks whether the H/W is ready
2172  *  Description: Returns whether the H/W is ready to go or not. Depending
2173  *  on whether adapter enable bit was written or not the comparison
2174  *  differs and the calling function passes the input argument flag to
2175  *  indicate this.
2176  *  Return: 1 If xena is quiescence
2177  *          0 If Xena is not quiescence
2178  */
2179
2180 static int verify_xena_quiescence(struct s2io_nic *sp)
2181 {
2182         int  mode;
2183         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2184         u64 val64 = readq(&bar0->adapter_status);
2185         mode = s2io_verify_pci_mode(sp);
2186
2187         if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2188                 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
2189                 return 0;
2190         }
2191         if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2192         DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
2193                 return 0;
2194         }
2195         if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2196                 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
2197                 return 0;
2198         }
2199         if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2200                 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
2201                 return 0;
2202         }
2203         if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2204                 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
2205                 return 0;
2206         }
2207         if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2208                 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
2209                 return 0;
2210         }
2211         if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2212                 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
2213                 return 0;
2214         }
2215         if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2216                 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
2217                 return 0;
2218         }
2219
2220         /*
2221          * In PCI 33 mode, the P_PLL is not used, and therefore,
2222          * the the P_PLL_LOCK bit in the adapter_status register will
2223          * not be asserted.
2224          */
2225         if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2226                 sp->device_type == XFRAME_II_DEVICE && mode !=
2227                 PCI_MODE_PCI_33) {
2228                 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
2229                 return 0;
2230         }
2231         if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2232                         ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2233                 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
2234                 return 0;
2235         }
2236         return 1;
2237 }
2238
2239 /**
2240  * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
2241  * @sp: Pointer to device specifc structure
2242  * Description :
2243  * New procedure to clear mac address reading  problems on Alpha platforms
2244  *
2245  */
2246
2247 static void fix_mac_address(struct s2io_nic * sp)
2248 {
2249         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2250         u64 val64;
2251         int i = 0;
2252
2253         while (fix_mac[i] != END_SIGN) {
2254                 writeq(fix_mac[i++], &bar0->gpio_control);
2255                 udelay(10);
2256                 val64 = readq(&bar0->gpio_control);
2257         }
2258 }
2259
2260 /**
2261  *  start_nic - Turns the device on
2262  *  @nic : device private variable.
2263  *  Description:
2264  *  This function actually turns the device on. Before this  function is
2265  *  called,all Registers are configured from their reset states
2266  *  and shared memory is allocated but the NIC is still quiescent. On
2267  *  calling this function, the device interrupts are cleared and the NIC is
2268  *  literally switched on by writing into the adapter control register.
2269  *  Return Value:
2270  *  SUCCESS on success and -1 on failure.
2271  */
2272
2273 static int start_nic(struct s2io_nic *nic)
2274 {
2275         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2276         struct net_device *dev = nic->dev;
2277         register u64 val64 = 0;
2278         u16 subid, i;
2279         struct mac_info *mac_control;
2280         struct config_param *config;
2281
2282         mac_control = &nic->mac_control;
2283         config = &nic->config;
2284
2285         /*  PRC Initialization and configuration */
2286         for (i = 0; i < config->rx_ring_num; i++) {
2287                 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
2288                        &bar0->prc_rxd0_n[i]);
2289
2290                 val64 = readq(&bar0->prc_ctrl_n[i]);
2291                 if (nic->rxd_mode == RXD_MODE_1)
2292                         val64 |= PRC_CTRL_RC_ENABLED;
2293                 else
2294                         val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2295                 if (nic->device_type == XFRAME_II_DEVICE)
2296                         val64 |= PRC_CTRL_GROUP_READS;
2297                 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2298                 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2299                 writeq(val64, &bar0->prc_ctrl_n[i]);
2300         }
2301
2302         if (nic->rxd_mode == RXD_MODE_3B) {
2303                 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2304                 val64 = readq(&bar0->rx_pa_cfg);
2305                 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2306                 writeq(val64, &bar0->rx_pa_cfg);
2307         }
2308
2309         if (vlan_tag_strip == 0) {
2310                 val64 = readq(&bar0->rx_pa_cfg);
2311                 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2312                 writeq(val64, &bar0->rx_pa_cfg);
2313                 vlan_strip_flag = 0;
2314         }
2315
2316         /*
2317          * Enabling MC-RLDRAM. After enabling the device, we timeout
2318          * for around 100ms, which is approximately the time required
2319          * for the device to be ready for operation.
2320          */
2321         val64 = readq(&bar0->mc_rldram_mrs);
2322         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2323         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2324         val64 = readq(&bar0->mc_rldram_mrs);
2325
2326         msleep(100);    /* Delay by around 100 ms. */
2327
2328         /* Enabling ECC Protection. */
2329         val64 = readq(&bar0->adapter_control);
2330         val64 &= ~ADAPTER_ECC_EN;
2331         writeq(val64, &bar0->adapter_control);
2332
2333         /*
2334          * Verify if the device is ready to be enabled, if so enable
2335          * it.
2336          */
2337         val64 = readq(&bar0->adapter_status);
2338         if (!verify_xena_quiescence(nic)) {
2339                 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2340                 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2341                           (unsigned long long) val64);
2342                 return FAILURE;
2343         }
2344
2345         /*
2346          * With some switches, link might be already up at this point.
2347          * Because of this weird behavior, when we enable laser,
2348          * we may not get link. We need to handle this. We cannot
2349          * figure out which switch is misbehaving. So we are forced to
2350          * make a global change.
2351          */
2352
2353         /* Enabling Laser. */
2354         val64 = readq(&bar0->adapter_control);
2355         val64 |= ADAPTER_EOI_TX_ON;
2356         writeq(val64, &bar0->adapter_control);
2357
2358         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2359                 /*
2360                  * Dont see link state interrupts initally on some switches,
2361                  * so directly scheduling the link state task here.
2362                  */
2363                 schedule_work(&nic->set_link_task);
2364         }
2365         /* SXE-002: Initialize link and activity LED */
2366         subid = nic->pdev->subsystem_device;
2367         if (((subid & 0xFF) >= 0x07) &&
2368             (nic->device_type == XFRAME_I_DEVICE)) {
2369                 val64 = readq(&bar0->gpio_control);
2370                 val64 |= 0x0000800000000000ULL;
2371                 writeq(val64, &bar0->gpio_control);
2372                 val64 = 0x0411040400000000ULL;
2373                 writeq(val64, (void __iomem *)bar0 + 0x2700);
2374         }
2375
2376         return SUCCESS;
2377 }
2378 /**
2379  * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2380  */
2381 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2382                                         TxD *txdlp, int get_off)
2383 {
2384         struct s2io_nic *nic = fifo_data->nic;
2385         struct sk_buff *skb;
2386         struct TxD *txds;
2387         u16 j, frg_cnt;
2388
2389         txds = txdlp;
2390         if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
2391                 pci_unmap_single(nic->pdev, (dma_addr_t)
2392                         txds->Buffer_Pointer, sizeof(u64),
2393                         PCI_DMA_TODEVICE);
2394                 txds++;
2395         }
2396
2397         skb = (struct sk_buff *) ((unsigned long)
2398                         txds->Host_Control);
2399         if (!skb) {
2400                 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2401                 return NULL;
2402         }
2403         pci_unmap_single(nic->pdev, (dma_addr_t)
2404                          txds->Buffer_Pointer,
2405                          skb->len - skb->data_len,
2406                          PCI_DMA_TODEVICE);
2407         frg_cnt = skb_shinfo(skb)->nr_frags;
2408         if (frg_cnt) {
2409                 txds++;
2410                 for (j = 0; j < frg_cnt; j++, txds++) {
2411                         skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2412                         if (!txds->Buffer_Pointer)
2413                                 break;
2414                         pci_unmap_page(nic->pdev, (dma_addr_t)
2415                                         txds->Buffer_Pointer,
2416                                        frag->size, PCI_DMA_TODEVICE);
2417                 }
2418         }
2419         memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
2420         return(skb);
2421 }
2422
2423 /**
2424  *  free_tx_buffers - Free all queued Tx buffers
2425  *  @nic : device private variable.
2426  *  Description:
2427  *  Free all queued Tx buffers.
2428  *  Return Value: void
2429 */
2430
2431 static void free_tx_buffers(struct s2io_nic *nic)
2432 {
2433         struct net_device *dev = nic->dev;
2434         struct sk_buff *skb;
2435         struct TxD *txdp;
2436         int i, j;
2437         struct mac_info *mac_control;
2438         struct config_param *config;
2439         int cnt = 0;
2440
2441         mac_control = &nic->mac_control;
2442         config = &nic->config;
2443
2444         for (i = 0; i < config->tx_fifo_num; i++) {
2445                 unsigned long flags;
2446                 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags);
2447                 for (j = 0; j < config->tx_cfg[i].fifo_len; j++) {
2448                         txdp = (struct TxD *) \
2449                         mac_control->fifos[i].list_info[j].list_virt_addr;
2450                         skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2451                         if (skb) {
2452                                 nic->mac_control.stats_info->sw_stat.mem_freed
2453                                         += skb->truesize;
2454                                 dev_kfree_skb(skb);
2455                                 cnt++;
2456                         }
2457                 }
2458                 DBG_PRINT(INTR_DBG,
2459                           "%s:forcibly freeing %d skbs on FIFO%d\n",
2460                           dev->name, cnt, i);
2461                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2462                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2463                 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock, flags);
2464         }
2465 }
2466
2467 /**
2468  *   stop_nic -  To stop the nic
2469  *   @nic ; device private variable.
2470  *   Description:
2471  *   This function does exactly the opposite of what the start_nic()
2472  *   function does. This function is called to stop the device.
2473  *   Return Value:
2474  *   void.
2475  */
2476
2477 static void stop_nic(struct s2io_nic *nic)
2478 {
2479         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2480         register u64 val64 = 0;
2481         u16 interruptible;
2482         struct mac_info *mac_control;
2483         struct config_param *config;
2484
2485         mac_control = &nic->mac_control;
2486         config = &nic->config;
2487
2488         /*  Disable all interrupts */
2489         en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2490         interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2491         interruptible |= TX_PIC_INTR;
2492         en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2493
2494         /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2495         val64 = readq(&bar0->adapter_control);
2496         val64 &= ~(ADAPTER_CNTL_EN);
2497         writeq(val64, &bar0->adapter_control);
2498 }
2499
2500 /**
2501  *  fill_rx_buffers - Allocates the Rx side skbs
2502  *  @ring_info: per ring structure
2503  *  Description:
2504  *  The function allocates Rx side skbs and puts the physical
2505  *  address of these buffers into the RxD buffer pointers, so that the NIC
2506  *  can DMA the received frame into these locations.
2507  *  The NIC supports 3 receive modes, viz
2508  *  1. single buffer,
2509  *  2. three buffer and
2510  *  3. Five buffer modes.
2511  *  Each mode defines how many fragments the received frame will be split
2512  *  up into by the NIC. The frame is split into L3 header, L4 Header,
2513  *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2514  *  is split into 3 fragments. As of now only single buffer mode is
2515  *  supported.
2516  *   Return Value:
2517  *  SUCCESS on success or an appropriate -ve value on failure.
2518  */
2519
2520 static int fill_rx_buffers(struct ring_info *ring)
2521 {
2522         struct sk_buff *skb;
2523         struct RxD_t *rxdp;
2524         int off, size, block_no, block_no1;
2525         u32 alloc_tab = 0;
2526         u32 alloc_cnt;
2527         u64 tmp;
2528         struct buffAdd *ba;
2529         struct RxD_t *first_rxdp = NULL;
2530         u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2531         int rxd_index = 0;
2532         struct RxD1 *rxdp1;
2533         struct RxD3 *rxdp3;
2534         struct swStat *stats = &ring->nic->mac_control.stats_info->sw_stat;
2535
2536         alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
2537
2538         block_no1 = ring->rx_curr_get_info.block_index;
2539         while (alloc_tab < alloc_cnt) {
2540                 block_no = ring->rx_curr_put_info.block_index;
2541
2542                 off = ring->rx_curr_put_info.offset;
2543
2544                 rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2545
2546                 rxd_index = off + 1;
2547                 if (block_no)
2548                         rxd_index += (block_no * ring->rxd_count);
2549
2550                 if ((block_no == block_no1) &&
2551                         (off == ring->rx_curr_get_info.offset) &&
2552                         (rxdp->Host_Control)) {
2553                         DBG_PRINT(INTR_DBG, "%s: Get and Put",
2554                                 ring->dev->name);
2555                         DBG_PRINT(INTR_DBG, " info equated\n");
2556                         goto end;
2557                 }
2558                 if (off && (off == ring->rxd_count)) {
2559                         ring->rx_curr_put_info.block_index++;
2560                         if (ring->rx_curr_put_info.block_index ==
2561                                                         ring->block_count)
2562                                 ring->rx_curr_put_info.block_index = 0;
2563                         block_no = ring->rx_curr_put_info.block_index;
2564                         off = 0;
2565                         ring->rx_curr_put_info.offset = off;
2566                         rxdp = ring->rx_blocks[block_no].block_virt_addr;
2567                         DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2568                                   ring->dev->name, rxdp);
2569
2570                 }
2571
2572                 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2573                         ((ring->rxd_mode == RXD_MODE_3B) &&
2574                                 (rxdp->Control_2 & s2BIT(0)))) {
2575                         ring->rx_curr_put_info.offset = off;
2576                         goto end;
2577                 }
2578                 /* calculate size of skb based on ring mode */
2579                 size = ring->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2580                                 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2581                 if (ring->rxd_mode == RXD_MODE_1)
2582                         size += NET_IP_ALIGN;
2583                 else
2584                         size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2585
2586                 /* allocate skb */
2587                 skb = dev_alloc_skb(size);
2588                 if(!skb) {
2589                         DBG_PRINT(INFO_DBG, "%s: Out of ", ring->dev->name);
2590                         DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n");
2591                         if (first_rxdp) {
2592                                 wmb();
2593                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2594                         }
2595                         stats->mem_alloc_fail_cnt++;
2596
2597                         return -ENOMEM ;
2598                 }
2599                 stats->mem_allocated += skb->truesize;
2600
2601                 if (ring->rxd_mode == RXD_MODE_1) {
2602                         /* 1 buffer mode - normal operation mode */
2603                         rxdp1 = (struct RxD1*)rxdp;
2604                         memset(rxdp, 0, sizeof(struct RxD1));
2605                         skb_reserve(skb, NET_IP_ALIGN);
2606                         rxdp1->Buffer0_ptr = pci_map_single
2607                             (ring->pdev, skb->data, size - NET_IP_ALIGN,
2608                                 PCI_DMA_FROMDEVICE);
2609                         if( (rxdp1->Buffer0_ptr == 0) ||
2610                                 (rxdp1->Buffer0_ptr ==
2611                                 DMA_ERROR_CODE))
2612                                 goto pci_map_failed;
2613
2614                         rxdp->Control_2 =
2615                                 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2616                         rxdp->Host_Control = (unsigned long) (skb);
2617                 } else if (ring->rxd_mode == RXD_MODE_3B) {
2618                         /*
2619                          * 2 buffer mode -
2620                          * 2 buffer mode provides 128
2621                          * byte aligned receive buffers.
2622                          */
2623
2624                         rxdp3 = (struct RxD3*)rxdp;
2625                         /* save buffer pointers to avoid frequent dma mapping */
2626                         Buffer0_ptr = rxdp3->Buffer0_ptr;
2627                         Buffer1_ptr = rxdp3->Buffer1_ptr;
2628                         memset(rxdp, 0, sizeof(struct RxD3));
2629                         /* restore the buffer pointers for dma sync*/
2630                         rxdp3->Buffer0_ptr = Buffer0_ptr;
2631                         rxdp3->Buffer1_ptr = Buffer1_ptr;
2632
2633                         ba = &ring->ba[block_no][off];
2634                         skb_reserve(skb, BUF0_LEN);
2635                         tmp = (u64)(unsigned long) skb->data;
2636                         tmp += ALIGN_SIZE;
2637                         tmp &= ~ALIGN_SIZE;
2638                         skb->data = (void *) (unsigned long)tmp;
2639                         skb_reset_tail_pointer(skb);
2640
2641                         if (!(rxdp3->Buffer0_ptr))
2642                                 rxdp3->Buffer0_ptr =
2643                                    pci_map_single(ring->pdev, ba->ba_0,
2644                                         BUF0_LEN, PCI_DMA_FROMDEVICE);
2645                         else
2646                                 pci_dma_sync_single_for_device(ring->pdev,
2647                                 (dma_addr_t) rxdp3->Buffer0_ptr,
2648                                     BUF0_LEN, PCI_DMA_FROMDEVICE);
2649                         if( (rxdp3->Buffer0_ptr == 0) ||
2650                                 (rxdp3->Buffer0_ptr == DMA_ERROR_CODE))
2651                                 goto pci_map_failed;
2652
2653                         rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2654                         if (ring->rxd_mode == RXD_MODE_3B) {
2655                                 /* Two buffer mode */
2656
2657                                 /*
2658                                  * Buffer2 will have L3/L4 header plus
2659                                  * L4 payload
2660                                  */
2661                                 rxdp3->Buffer2_ptr = pci_map_single
2662                                 (ring->pdev, skb->data, ring->mtu + 4,
2663                                                 PCI_DMA_FROMDEVICE);
2664
2665                                 if( (rxdp3->Buffer2_ptr == 0) ||
2666                                         (rxdp3->Buffer2_ptr == DMA_ERROR_CODE))
2667                                         goto pci_map_failed;
2668
2669                                 if (!rxdp3->Buffer1_ptr)
2670                                         rxdp3->Buffer1_ptr =
2671                                                 pci_map_single(ring->pdev,
2672                                                 ba->ba_1, BUF1_LEN,
2673                                                 PCI_DMA_FROMDEVICE);
2674
2675                                 if( (rxdp3->Buffer1_ptr == 0) ||
2676                                         (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
2677                                         pci_unmap_single
2678                                                 (ring->pdev,
2679                                                 (dma_addr_t)(unsigned long)
2680                                                 skb->data,
2681                                                 ring->mtu + 4,
2682                                                 PCI_DMA_FROMDEVICE);
2683                                         goto pci_map_failed;
2684                                 }
2685                                 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2686                                 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2687                                                                 (ring->mtu + 4);
2688                         }
2689                         rxdp->Control_2 |= s2BIT(0);
2690                         rxdp->Host_Control = (unsigned long) (skb);
2691                 }
2692                 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2693                         rxdp->Control_1 |= RXD_OWN_XENA;
2694                 off++;
2695                 if (off == (ring->rxd_count + 1))
2696                         off = 0;
2697                 ring->rx_curr_put_info.offset = off;
2698
2699                 rxdp->Control_2 |= SET_RXD_MARKER;
2700                 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2701                         if (first_rxdp) {
2702                                 wmb();
2703                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2704                         }
2705                         first_rxdp = rxdp;
2706                 }
2707                 ring->rx_bufs_left += 1;
2708                 alloc_tab++;
2709         }
2710
2711       end:
2712         /* Transfer ownership of first descriptor to adapter just before
2713          * exiting. Before that, use memory barrier so that ownership
2714          * and other fields are seen by adapter correctly.
2715          */
2716         if (first_rxdp) {
2717                 wmb();
2718                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2719         }
2720
2721         return SUCCESS;
2722 pci_map_failed:
2723         stats->pci_map_fail_cnt++;
2724         stats->mem_freed += skb->truesize;
2725         dev_kfree_skb_irq(skb);
2726         return -ENOMEM;
2727 }
2728
2729 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2730 {
2731         struct net_device *dev = sp->dev;
2732         int j;
2733         struct sk_buff *skb;
2734         struct RxD_t *rxdp;
2735         struct mac_info *mac_control;
2736         struct buffAdd *ba;
2737         struct RxD1 *rxdp1;
2738         struct RxD3 *rxdp3;
2739
2740         mac_control = &sp->mac_control;
2741         for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2742                 rxdp = mac_control->rings[ring_no].
2743                                 rx_blocks[blk].rxds[j].virt_addr;
2744                 skb = (struct sk_buff *)
2745                         ((unsigned long) rxdp->Host_Control);
2746                 if (!skb) {
2747                         continue;
2748                 }
2749                 if (sp->rxd_mode == RXD_MODE_1) {
2750                         rxdp1 = (struct RxD1*)rxdp;
2751                         pci_unmap_single(sp->pdev, (dma_addr_t)
2752                                 rxdp1->Buffer0_ptr,
2753                                 dev->mtu +
2754                                 HEADER_ETHERNET_II_802_3_SIZE
2755                                 + HEADER_802_2_SIZE +
2756                                 HEADER_SNAP_SIZE,
2757                                 PCI_DMA_FROMDEVICE);
2758                         memset(rxdp, 0, sizeof(struct RxD1));
2759                 } else if(sp->rxd_mode == RXD_MODE_3B) {
2760                         rxdp3 = (struct RxD3*)rxdp;
2761                         ba = &mac_control->rings[ring_no].
2762                                 ba[blk][j];
2763                         pci_unmap_single(sp->pdev, (dma_addr_t)
2764                                 rxdp3->Buffer0_ptr,
2765                                 BUF0_LEN,
2766                                 PCI_DMA_FROMDEVICE);
2767                         pci_unmap_single(sp->pdev, (dma_addr_t)
2768                                 rxdp3->Buffer1_ptr,
2769                                 BUF1_LEN,
2770                                 PCI_DMA_FROMDEVICE);
2771                         pci_unmap_single(sp->pdev, (dma_addr_t)
2772                                 rxdp3->Buffer2_ptr,
2773                                 dev->mtu + 4,
2774                                 PCI_DMA_FROMDEVICE);
2775                         memset(rxdp, 0, sizeof(struct RxD3));
2776                 }
2777                 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2778                 dev_kfree_skb(skb);
2779                 mac_control->rings[ring_no].rx_bufs_left -= 1;
2780         }
2781 }
2782
2783 /**
2784  *  free_rx_buffers - Frees all Rx buffers
2785  *  @sp: device private variable.
2786  *  Description:
2787  *  This function will free all Rx buffers allocated by host.
2788  *  Return Value:
2789  *  NONE.
2790  */
2791
2792 static void free_rx_buffers(struct s2io_nic *sp)
2793 {
2794         struct net_device *dev = sp->dev;
2795         int i, blk = 0, buf_cnt = 0;
2796         struct mac_info *mac_control;
2797         struct config_param *config;
2798
2799         mac_control = &sp->mac_control;
2800         config = &sp->config;
2801
2802         for (i = 0; i < config->rx_ring_num; i++) {
2803                 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2804                         free_rxd_blk(sp,i,blk);
2805
2806                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2807                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2808                 mac_control->rings[i].rx_curr_put_info.offset = 0;
2809                 mac_control->rings[i].rx_curr_get_info.offset = 0;
2810                 mac_control->rings[i].rx_bufs_left = 0;
2811                 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2812                           dev->name, buf_cnt, i);
2813         }
2814 }
2815
2816 /**
2817  * s2io_poll - Rx interrupt handler for NAPI support
2818  * @napi : pointer to the napi structure.
2819  * @budget : The number of packets that were budgeted to be processed
2820  * during  one pass through the 'Poll" function.
2821  * Description:
2822  * Comes into picture only if NAPI support has been incorporated. It does
2823  * the same thing that rx_intr_handler does, but not in a interrupt context
2824  * also It will process only a given number of packets.
2825  * Return value:
2826  * 0 on success and 1 if there are No Rx packets to be processed.
2827  */
2828
2829 static int s2io_poll(struct napi_struct *napi, int budget)
2830 {
2831         struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2832         struct net_device *dev = nic->dev;
2833         int pkt_cnt = 0, org_pkts_to_process;
2834         struct mac_info *mac_control;
2835         struct config_param *config;
2836         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2837         int i;
2838
2839         mac_control = &nic->mac_control;
2840         config = &nic->config;
2841
2842         nic->pkts_to_process = budget;
2843         org_pkts_to_process = nic->pkts_to_process;
2844
2845         writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
2846         readl(&bar0->rx_traffic_int);
2847
2848         for (i = 0; i < config->rx_ring_num; i++) {
2849                 rx_intr_handler(&mac_control->rings[i]);
2850                 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2851                 if (!nic->pkts_to_process) {
2852                         /* Quota for the current iteration has been met */
2853                         goto no_rx;
2854                 }
2855         }
2856
2857         netif_rx_complete(dev, napi);
2858
2859         for (i = 0; i < config->rx_ring_num; i++) {
2860                 if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) {
2861                         DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2862                         DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2863                         break;
2864                 }
2865         }
2866         /* Re enable the Rx interrupts. */
2867         writeq(0x0, &bar0->rx_traffic_mask);
2868         readl(&bar0->rx_traffic_mask);
2869         return pkt_cnt;
2870
2871 no_rx:
2872         for (i = 0; i < config->rx_ring_num; i++) {
2873                 if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) {
2874                         DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2875                         DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2876                         break;
2877                 }
2878         }
2879         return pkt_cnt;
2880 }
2881
2882 #ifdef CONFIG_NET_POLL_CONTROLLER
2883 /**
2884  * s2io_netpoll - netpoll event handler entry point
2885  * @dev : pointer to the device structure.
2886  * Description:
2887  *      This function will be called by upper layer to check for events on the
2888  * interface in situations where interrupts are disabled. It is used for
2889  * specific in-kernel networking tasks, such as remote consoles and kernel
2890  * debugging over the network (example netdump in RedHat).
2891  */
2892 static void s2io_netpoll(struct net_device *dev)
2893 {
2894         struct s2io_nic *nic = dev->priv;
2895         struct mac_info *mac_control;
2896         struct config_param *config;
2897         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2898         u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2899         int i;
2900
2901         if (pci_channel_offline(nic->pdev))
2902                 return;
2903
2904         disable_irq(dev->irq);
2905
2906         mac_control = &nic->mac_control;
2907         config = &nic->config;
2908
2909         writeq(val64, &bar0->rx_traffic_int);
2910         writeq(val64, &bar0->tx_traffic_int);
2911
2912         /* we need to free up the transmitted skbufs or else netpoll will
2913          * run out of skbs and will fail and eventually netpoll application such
2914          * as netdump will fail.
2915          */
2916         for (i = 0; i < config->tx_fifo_num; i++)
2917                 tx_intr_handler(&mac_control->fifos[i]);
2918
2919         /* check for received packet and indicate up to network */
2920         for (i = 0; i < config->rx_ring_num; i++)
2921                 rx_intr_handler(&mac_control->rings[i]);
2922
2923         for (i = 0; i < config->rx_ring_num; i++) {
2924                 if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) {
2925                         DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2926                         DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
2927                         break;
2928                 }
2929         }
2930         enable_irq(dev->irq);
2931         return;
2932 }
2933 #endif
2934
2935 /**
2936  *  rx_intr_handler - Rx interrupt handler
2937  *  @nic: device private variable.
2938  *  Description:
2939  *  If the interrupt is because of a received frame or if the
2940  *  receive ring contains fresh as yet un-processed frames,this function is
2941  *  called. It picks out the RxD at which place the last Rx processing had
2942  *  stopped and sends the skb to the OSM's Rx handler and then increments
2943  *  the offset.
2944  *  Return Value:
2945  *  NONE.
2946  */
2947 static void rx_intr_handler(struct ring_info *ring_data)
2948 {
2949         int get_block, put_block;
2950         struct rx_curr_get_info get_info, put_info;
2951         struct RxD_t *rxdp;
2952         struct sk_buff *skb;
2953         int pkt_cnt = 0;
2954         int i;
2955         struct RxD1* rxdp1;
2956         struct RxD3* rxdp3;
2957
2958         get_info = ring_data->rx_curr_get_info;
2959         get_block = get_info.block_index;
2960         memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2961         put_block = put_info.block_index;
2962         rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2963
2964         while (RXD_IS_UP2DT(rxdp)) {
2965                 /*
2966                  * If your are next to put index then it's
2967                  * FIFO full condition
2968                  */
2969                 if ((get_block == put_block) &&
2970                     (get_info.offset + 1) == put_info.offset) {
2971                         DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
2972                                 ring_data->dev->name);
2973                         break;
2974                 }
2975                 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2976                 if (skb == NULL) {
2977                         DBG_PRINT(ERR_DBG, "%s: The skb is ",
2978                                   ring_data->dev->name);
2979                         DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2980                         return;
2981                 }
2982                 if (ring_data->rxd_mode == RXD_MODE_1) {
2983                         rxdp1 = (struct RxD1*)rxdp;
2984                         pci_unmap_single(ring_data->pdev, (dma_addr_t)
2985                                 rxdp1->Buffer0_ptr,
2986                                 ring_data->mtu +
2987                                 HEADER_ETHERNET_II_802_3_SIZE +
2988                                 HEADER_802_2_SIZE +
2989                                 HEADER_SNAP_SIZE,
2990                                 PCI_DMA_FROMDEVICE);
2991                 } else if (ring_data->rxd_mode == RXD_MODE_3B) {
2992                         rxdp3 = (struct RxD3*)rxdp;
2993                         pci_dma_sync_single_for_cpu(ring_data->pdev, (dma_addr_t)
2994                                 rxdp3->Buffer0_ptr,
2995                                 BUF0_LEN, PCI_DMA_FROMDEVICE);
2996                         pci_unmap_single(ring_data->pdev, (dma_addr_t)
2997                                 rxdp3->Buffer2_ptr,
2998                                 ring_data->mtu + 4,
2999                                 PCI_DMA_FROMDEVICE);
3000                 }
3001                 prefetch(skb->data);
3002                 rx_osm_handler(ring_data, rxdp);
3003                 get_info.offset++;
3004                 ring_data->rx_curr_get_info.offset = get_info.offset;
3005                 rxdp = ring_data->rx_blocks[get_block].
3006                                 rxds[get_info.offset].virt_addr;
3007                 if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
3008                         get_info.offset = 0;
3009                         ring_data->rx_curr_get_info.offset = get_info.offset;
3010                         get_block++;
3011                         if (get_block == ring_data->block_count)
3012                                 get_block = 0;
3013                         ring_data->rx_curr_get_info.block_index = get_block;
3014                         rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
3015                 }
3016
3017                 if(ring_data->nic->config.napi){
3018                         ring_data->nic->pkts_to_process -= 1;
3019                         if (!ring_data->nic->pkts_to_process)
3020                                 break;
3021                 }
3022                 pkt_cnt++;
3023                 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
3024                         break;
3025         }
3026         if (ring_data->lro) {
3027                 /* Clear all LRO sessions before exiting */
3028                 for (i=0; i<MAX_LRO_SESSIONS; i++) {
3029                         struct lro *lro = &ring_data->lro0_n[i];
3030                         if (lro->in_use) {
3031                                 update_L3L4_header(ring_data->nic, lro);
3032                                 queue_rx_frame(lro->parent, lro->vlan_tag);
3033                                 clear_lro_session(lro);
3034                         }
3035                 }
3036         }
3037 }
3038
3039 /**
3040  *  tx_intr_handler - Transmit interrupt handler
3041  *  @nic : device private variable
3042  *  Description:
3043  *  If an interrupt was raised to indicate DMA complete of the
3044  *  Tx packet, this function is called. It identifies the last TxD
3045  *  whose buffer was freed and frees all skbs whose data have already
3046  *  DMA'ed into the NICs internal memory.
3047  *  Return Value:
3048  *  NONE
3049  */
3050
3051 static void tx_intr_handler(struct fifo_info *fifo_data)
3052 {
3053         struct s2io_nic *nic = fifo_data->nic;
3054         struct tx_curr_get_info get_info, put_info;
3055         struct sk_buff *skb = NULL;
3056         struct TxD *txdlp;
3057         int pkt_cnt = 0;
3058         unsigned long flags = 0;
3059         u8 err_mask;
3060
3061         if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
3062                         return;
3063
3064         get_info = fifo_data->tx_curr_get_info;
3065         memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
3066         txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
3067             list_virt_addr;
3068         while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3069                (get_info.offset != put_info.offset) &&
3070                (txdlp->Host_Control)) {
3071                 /* Check for TxD errors */
3072                 if (txdlp->Control_1 & TXD_T_CODE) {
3073                         unsigned long long err;
3074                         err = txdlp->Control_1 & TXD_T_CODE;
3075                         if (err & 0x1) {
3076                                 nic->mac_control.stats_info->sw_stat.
3077                                                 parity_err_cnt++;
3078                         }
3079
3080                         /* update t_code statistics */
3081                         err_mask = err >> 48;
3082                         switch(err_mask) {
3083                                 case 2:
3084                                         nic->mac_control.stats_info->sw_stat.
3085                                                         tx_buf_abort_cnt++;
3086                                 break;
3087
3088                                 case 3:
3089                                         nic->mac_control.stats_info->sw_stat.
3090                                                         tx_desc_abort_cnt++;
3091                                 break;
3092
3093                                 case 7:
3094                                         nic->mac_control.stats_info->sw_stat.
3095                                                         tx_parity_err_cnt++;
3096                                 break;
3097
3098                                 case 10:
3099                                         nic->mac_control.stats_info->sw_stat.
3100                                                         tx_link_loss_cnt++;
3101                                 break;
3102
3103                                 case 15:
3104                                         nic->mac_control.stats_info->sw_stat.
3105                                                         tx_list_proc_err_cnt++;
3106                                 break;
3107                         }
3108                 }
3109
3110                 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
3111                 if (skb == NULL) {
3112                         spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3113                         DBG_PRINT(ERR_DBG, "%s: Null skb ",
3114                         __FUNCTION__);
3115                         DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
3116                         return;
3117                 }
3118                 pkt_cnt++;
3119
3120                 /* Updating the statistics block */
3121                 nic->stats.tx_bytes += skb->len;
3122                 nic->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
3123                 dev_kfree_skb_irq(skb);
3124
3125                 get_info.offset++;
3126                 if (get_info.offset == get_info.fifo_len + 1)
3127                         get_info.offset = 0;
3128                 txdlp = (struct TxD *) fifo_data->list_info
3129                     [get_info.offset].list_virt_addr;
3130                 fifo_data->tx_curr_get_info.offset =
3131                     get_info.offset;
3132         }
3133
3134         s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
3135
3136         spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3137 }
3138
3139 /**
3140  *  s2io_mdio_write - Function to write in to MDIO registers
3141  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3142  *  @addr     : address value
3143  *  @value    : data value
3144  *  @dev      : pointer to net_device structure
3145  *  Description:
3146  *  This function is used to write values to the MDIO registers
3147  *  NONE
3148  */
3149 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
3150 {
3151         u64 val64 = 0x0;
3152         struct s2io_nic *sp = dev->priv;
3153         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3154
3155         //address transaction
3156         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3157                         | MDIO_MMD_DEV_ADDR(mmd_type)
3158                         | MDIO_MMS_PRT_ADDR(0x0);
3159         writeq(val64, &bar0->mdio_control);
3160         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3161         writeq(val64, &bar0->mdio_control);
3162         udelay(100);
3163
3164         //Data transaction
3165         val64 = 0x0;
3166         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3167                         | MDIO_MMD_DEV_ADDR(mmd_type)
3168                         | MDIO_MMS_PRT_ADDR(0x0)
3169                         | MDIO_MDIO_DATA(value)
3170                         | MDIO_OP(MDIO_OP_WRITE_TRANS);
3171         writeq(val64, &bar0->mdio_control);
3172         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3173         writeq(val64, &bar0->mdio_control);
3174         udelay(100);
3175
3176         val64 = 0x0;
3177         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3178         | MDIO_MMD_DEV_ADDR(mmd_type)
3179         | MDIO_MMS_PRT_ADDR(0x0)
3180         | MDIO_OP(MDIO_OP_READ_TRANS);
3181         writeq(val64, &bar0->mdio_control);
3182         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3183         writeq(val64, &bar0->mdio_control);
3184         udelay(100);
3185
3186 }
3187
3188 /**
3189  *  s2io_mdio_read - Function to write in to MDIO registers
3190  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3191  *  @addr     : address value
3192  *  @dev      : pointer to net_device structure
3193  *  Description:
3194  *  This function is used to read values to the MDIO registers
3195  *  NONE
3196  */
3197 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3198 {
3199         u64 val64 = 0x0;
3200         u64 rval64 = 0x0;
3201         struct s2io_nic *sp = dev->priv;
3202         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3203
3204         /* address transaction */
3205         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3206                         | MDIO_MMD_DEV_ADDR(mmd_type)
3207                         | MDIO_MMS_PRT_ADDR(0x0);
3208         writeq(val64, &bar0->mdio_control);
3209         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3210         writeq(val64, &bar0->mdio_control);
3211         udelay(100);
3212
3213         /* Data transaction */
3214         val64 = 0x0;
3215         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3216                         | MDIO_MMD_DEV_ADDR(mmd_type)
3217                         | MDIO_MMS_PRT_ADDR(0x0)
3218                         | MDIO_OP(MDIO_OP_READ_TRANS);
3219         writeq(val64, &bar0->mdio_control);
3220         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3221         writeq(val64, &bar0->mdio_control);
3222         udelay(100);
3223
3224         /* Read the value from regs */
3225         rval64 = readq(&bar0->mdio_control);
3226         rval64 = rval64 & 0xFFFF0000;
3227         rval64 = rval64 >> 16;
3228         return rval64;
3229 }
3230 /**
3231  *  s2io_chk_xpak_counter - Function to check the status of the xpak counters
3232  *  @counter      : couter value to be updated
3233  *  @flag         : flag to indicate the status
3234  *  @type         : counter type
3235  *  Description:
3236  *  This function is to check the status of the xpak counters value
3237  *  NONE
3238  */
3239
3240 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
3241 {
3242         u64 mask = 0x3;
3243         u64 val64;
3244         int i;
3245         for(i = 0; i <index; i++)
3246                 mask = mask << 0x2;
3247
3248         if(flag > 0)
3249         {
3250                 *counter = *counter + 1;
3251                 val64 = *regs_stat & mask;
3252                 val64 = val64 >> (index * 0x2);
3253                 val64 = val64 + 1;
3254                 if(val64 == 3)
3255                 {
3256                         switch(type)
3257                         {
3258                         case 1:
3259                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3260                                           "service. Excessive temperatures may "
3261                                           "result in premature transceiver "
3262                                           "failure \n");
3263                         break;
3264                         case 2:
3265                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3266                                           "service Excessive bias currents may "
3267                                           "indicate imminent laser diode "
3268                                           "failure \n");
3269                         break;
3270                         case 3:
3271                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3272                                           "service Excessive laser output "
3273                                           "power may saturate far-end "
3274                                           "receiver\n");
3275                         break;
3276                         default:
3277                                 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
3278                                           "type \n");
3279                         }
3280                         val64 = 0x0;
3281                 }
3282                 val64 = val64 << (index * 0x2);
3283                 *regs_stat = (*regs_stat & (~mask)) | (val64);
3284
3285         } else {
3286                 *regs_stat = *regs_stat & (~mask);
3287         }
3288 }
3289
3290 /**
3291  *  s2io_updt_xpak_counter - Function to update the xpak counters
3292  *  @dev         : pointer to net_device struct
3293  *  Description:
3294  *  This function is to upate the status of the xpak counters value
3295  *  NONE
3296  */
3297 static void s2io_updt_xpak_counter(struct net_device *dev)
3298 {
3299         u16 flag  = 0x0;
3300         u16 type  = 0x0;
3301         u16 val16 = 0x0;
3302         u64 val64 = 0x0;
3303         u64 addr  = 0x0;
3304
3305         struct s2io_nic *sp = dev->priv;
3306         struct stat_block *stat_info = sp->mac_control.stats_info;
3307
3308         /* Check the communication with the MDIO slave */
3309         addr = 0x0000;
3310         val64 = 0x0;
3311         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3312         if((val64 == 0xFFFF) || (val64 == 0x0000))
3313         {
3314                 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3315                           "Returned %llx\n", (unsigned long long)val64);
3316                 return;
3317         }
3318
3319         /* Check for the expecte value of 2040 at PMA address 0x0000 */
3320         if(val64 != 0x2040)
3321         {
3322                 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3323                 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3324                           (unsigned long long)val64);
3325                 return;
3326         }
3327
3328         /* Loading the DOM register to MDIO register */
3329         addr = 0xA100;
3330         s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3331         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3332
3333         /* Reading the Alarm flags */
3334         addr = 0xA070;
3335         val64 = 0x0;
3336         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3337
3338         flag = CHECKBIT(val64, 0x7);
3339         type = 1;
3340         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3341                                 &stat_info->xpak_stat.xpak_regs_stat,
3342                                 0x0, flag, type);
3343
3344         if(CHECKBIT(val64, 0x6))
3345                 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3346
3347         flag = CHECKBIT(val64, 0x3);
3348         type = 2;
3349         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3350                                 &stat_info->xpak_stat.xpak_regs_stat,
3351                                 0x2, flag, type);
3352
3353         if(CHECKBIT(val64, 0x2))
3354                 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3355
3356         flag = CHECKBIT(val64, 0x1);
3357         type = 3;
3358         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3359                                 &stat_info->xpak_stat.xpak_regs_stat,
3360                                 0x4, flag, type);
3361
3362         if(CHECKBIT(val64, 0x0))
3363                 stat_info->xpak_stat.alarm_laser_output_power_low++;
3364
3365         /* Reading the Warning flags */
3366         addr = 0xA074;
3367         val64 = 0x0;
3368         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3369
3370         if(CHECKBIT(val64, 0x7))
3371                 stat_info->xpak_stat.warn_transceiver_temp_high++;
3372
3373         if(CHECKBIT(val64, 0x6))
3374                 stat_info->xpak_stat.warn_transceiver_temp_low++;
3375
3376         if(CHECKBIT(val64, 0x3))
3377                 stat_info->xpak_stat.warn_laser_bias_current_high++;
3378
3379         if(CHECKBIT(val64, 0x2))
3380                 stat_info->xpak_stat.warn_laser_bias_current_low++;
3381
3382         if(CHECKBIT(val64, 0x1))
3383                 stat_info->xpak_stat.warn_laser_output_power_high++;
3384
3385         if(CHECKBIT(val64, 0x0))
3386                 stat_info->xpak_stat.warn_laser_output_power_low++;
3387 }
3388
3389 /**
3390  *  wait_for_cmd_complete - waits for a command to complete.
3391  *  @sp : private member of the device structure, which is a pointer to the
3392  *  s2io_nic structure.
3393  *  Description: Function that waits for a command to Write into RMAC
3394  *  ADDR DATA registers to be completed and returns either success or
3395  *  error depending on whether the command was complete or not.
3396  *  Return value:
3397  *   SUCCESS on success and FAILURE on failure.
3398  */
3399
3400 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3401                                 int bit_state)
3402 {
3403         int ret = FAILURE, cnt = 0, delay = 1;
3404         u64 val64;
3405
3406         if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3407                 return FAILURE;
3408
3409         do {
3410                 val64 = readq(addr);
3411                 if (bit_state == S2IO_BIT_RESET) {
3412                         if (!(val64 & busy_bit)) {
3413                                 ret = SUCCESS;
3414                                 break;
3415                         }
3416                 } else {
3417                         if (!(val64 & busy_bit)) {
3418                                 ret = SUCCESS;
3419                                 break;
3420                         }
3421                 }
3422
3423                 if(in_interrupt())
3424                         mdelay(delay);
3425                 else
3426                         msleep(delay);
3427
3428                 if (++cnt >= 10)
3429                         delay = 50;
3430         } while (cnt < 20);
3431         return ret;
3432 }
3433 /*
3434  * check_pci_device_id - Checks if the device id is supported
3435  * @id : device id
3436  * Description: Function to check if the pci device id is supported by driver.
3437  * Return value: Actual device id if supported else PCI_ANY_ID
3438  */
3439 static u16 check_pci_device_id(u16 id)
3440 {
3441         switch (id) {
3442         case PCI_DEVICE_ID_HERC_WIN:
3443         case PCI_DEVICE_ID_HERC_UNI:
3444                 return XFRAME_II_DEVICE;
3445         case PCI_DEVICE_ID_S2IO_UNI:
3446         case PCI_DEVICE_ID_S2IO_WIN:
3447                 return XFRAME_I_DEVICE;
3448         default:
3449                 return PCI_ANY_ID;
3450         }
3451 }
3452
3453 /**
3454  *  s2io_reset - Resets the card.
3455  *  @sp : private member of the device structure.
3456  *  Description: Function to Reset the card. This function then also
3457  *  restores the previously saved PCI configuration space registers as
3458  *  the card reset also resets the configuration space.
3459  *  Return value:
3460  *  void.
3461  */
3462
3463 static void s2io_reset(struct s2io_nic * sp)
3464 {
3465         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3466         u64 val64;
3467         u16 subid, pci_cmd;
3468         int i;
3469         u16 val16;
3470         unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3471         unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3472
3473         DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3474                         __FUNCTION__, sp->dev->name);
3475
3476         /* Back up  the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3477         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3478
3479         val64 = SW_RESET_ALL;
3480         writeq(val64, &bar0->sw_reset);
3481         if (strstr(sp->product_name, "CX4")) {
3482                 msleep(750);
3483         }
3484         msleep(250);
3485         for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3486
3487                 /* Restore the PCI state saved during initialization. */
3488                 pci_restore_state(sp->pdev);
3489                 pci_read_config_word(sp->pdev, 0x2, &val16);
3490                 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3491                         break;
3492                 msleep(200);
3493         }
3494
3495         if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3496                 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
3497         }
3498
3499         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3500
3501         s2io_init_pci(sp);
3502
3503         /* Set swapper to enable I/O register access */
3504         s2io_set_swapper(sp);
3505
3506         /* restore mac_addr entries */
3507         do_s2io_restore_unicast_mc(sp);
3508
3509         /* Restore the MSIX table entries from local variables */
3510         restore_xmsi_data(sp);
3511
3512         /* Clear certain PCI/PCI-X fields after reset */
3513         if (sp->device_type == XFRAME_II_DEVICE) {
3514                 /* Clear "detected parity error" bit */
3515                 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3516
3517                 /* Clearing PCIX Ecc status register */
3518                 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3519
3520                 /* Clearing PCI_STATUS error reflected here */
3521                 writeq(s2BIT(62), &bar0->txpic_int_reg);
3522         }
3523
3524         /* Reset device statistics maintained by OS */
3525         memset(&sp->stats, 0, sizeof (struct net_device_stats));
3526
3527         up_cnt = sp->mac_control.stats_info->sw_stat.link_up_cnt;
3528         down_cnt = sp->mac_control.stats_info->sw_stat.link_down_cnt;
3529         up_time = sp->mac_control.stats_info->sw_stat.link_up_time;
3530         down_time = sp->mac_control.stats_info->sw_stat.link_down_time;
3531         reset_cnt = sp->mac_control.stats_info->sw_stat.soft_reset_cnt;
3532         mem_alloc_cnt = sp->mac_control.stats_info->sw_stat.mem_allocated;
3533         mem_free_cnt = sp->mac_control.stats_info->sw_stat.mem_freed;
3534         watchdog_cnt = sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt;
3535         /* save link up/down time/cnt, reset/memory/watchdog cnt */
3536         memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
3537         /* restore link up/down time/cnt, reset/memory/watchdog cnt */
3538         sp->mac_control.stats_info->sw_stat.link_up_cnt = up_cnt;
3539         sp->mac_control.stats_info->sw_stat.link_down_cnt = down_cnt;
3540         sp->mac_control.stats_info->sw_stat.link_up_time = up_time;
3541         sp->mac_control.stats_info->sw_stat.link_down_time = down_time;
3542         sp->mac_control.stats_info->sw_stat.soft_reset_cnt = reset_cnt;
3543         sp->mac_control.stats_info->sw_stat.mem_allocated = mem_alloc_cnt;
3544         sp->mac_control.stats_info->sw_stat.mem_freed = mem_free_cnt;
3545         sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt = watchdog_cnt;
3546
3547         /* SXE-002: Configure link and activity LED to turn it off */
3548         subid = sp->pdev->subsystem_device;
3549         if (((subid & 0xFF) >= 0x07) &&
3550             (sp->device_type == XFRAME_I_DEVICE)) {
3551                 val64 = readq(&bar0->gpio_control);
3552                 val64 |= 0x0000800000000000ULL;
3553                 writeq(val64, &bar0->gpio_control);
3554                 val64 = 0x0411040400000000ULL;
3555                 writeq(val64, (void __iomem *)bar0 + 0x2700);
3556         }
3557
3558         /*
3559          * Clear spurious ECC interrupts that would have occured on
3560          * XFRAME II cards after reset.
3561          */
3562         if (sp->device_type == XFRAME_II_DEVICE) {
3563                 val64 = readq(&bar0->pcc_err_reg);
3564                 writeq(val64, &bar0->pcc_err_reg);
3565         }
3566
3567         sp->device_enabled_once = FALSE;
3568 }
3569
3570 /**
3571  *  s2io_set_swapper - to set the swapper controle on the card
3572  *  @sp : private member of the device structure,
3573  *  pointer to the s2io_nic structure.
3574  *  Description: Function to set the swapper control on the card
3575  *  correctly depending on the 'endianness' of the system.
3576  *  Return value:
3577  *  SUCCESS on success and FAILURE on failure.
3578  */
3579
3580 static int s2io_set_swapper(struct s2io_nic * sp)
3581 {
3582         struct net_device *dev = sp->dev;
3583         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3584         u64 val64, valt, valr;
3585
3586         /*
3587          * Set proper endian settings and verify the same by reading
3588          * the PIF Feed-back register.
3589          */
3590
3591         val64 = readq(&bar0->pif_rd_swapper_fb);
3592         if (val64 != 0x0123456789ABCDEFULL) {
3593                 int i = 0;
3594                 u64 value[] = { 0xC30000C3C30000C3ULL,   /* FE=1, SE=1 */
3595                                 0x8100008181000081ULL,  /* FE=1, SE=0 */
3596                                 0x4200004242000042ULL,  /* FE=0, SE=1 */
3597                                 0};                     /* FE=0, SE=0 */
3598
3599                 while(i<4) {
3600                         writeq(value[i], &bar0->swapper_ctrl);
3601                         val64 = readq(&bar0->pif_rd_swapper_fb);
3602                         if (val64 == 0x0123456789ABCDEFULL)
3603                                 break;
3604                         i++;
3605                 }
3606                 if (i == 4) {
3607                         DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3608                                 dev->name);
3609                         DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3610                                 (unsigned long long) val64);
3611                         return FAILURE;
3612                 }
3613                 valr = value[i];
3614         } else {
3615                 valr = readq(&bar0->swapper_ctrl);
3616         }
3617
3618         valt = 0x0123456789ABCDEFULL;
3619         writeq(valt, &bar0->xmsi_address);
3620         val64 = readq(&bar0->xmsi_address);
3621
3622         if(val64 != valt) {
3623                 int i = 0;
3624                 u64 value[] = { 0x00C3C30000C3C300ULL,  /* FE=1, SE=1 */
3625                                 0x0081810000818100ULL,  /* FE=1, SE=0 */
3626                                 0x0042420000424200ULL,  /* FE=0, SE=1 */
3627                                 0};                     /* FE=0, SE=0 */
3628
3629                 while(i<4) {
3630                         writeq((value[i] | valr), &bar0->swapper_ctrl);
3631                         writeq(valt, &bar0->xmsi_address);
3632                         val64 = readq(&bar0->xmsi_address);
3633                         if(val64 == valt)
3634                                 break;
3635                         i++;
3636                 }
3637                 if(i == 4) {
3638                         unsigned long long x = val64;
3639                         DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3640                         DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3641                         return FAILURE;
3642                 }
3643         }
3644         val64 = readq(&bar0->swapper_ctrl);
3645         val64 &= 0xFFFF000000000000ULL;
3646
3647 #ifdef  __BIG_ENDIAN
3648         /*
3649          * The device by default set to a big endian format, so a
3650          * big endian driver need not set anything.
3651          */
3652         val64 |= (SWAPPER_CTRL_TXP_FE |
3653                  SWAPPER_CTRL_TXP_SE |
3654                  SWAPPER_CTRL_TXD_R_FE |
3655                  SWAPPER_CTRL_TXD_W_FE |
3656                  SWAPPER_CTRL_TXF_R_FE |
3657                  SWAPPER_CTRL_RXD_R_FE |
3658                  SWAPPER_CTRL_RXD_W_FE |
3659                  SWAPPER_CTRL_RXF_W_FE |
3660                  SWAPPER_CTRL_XMSI_FE |
3661                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3662         if (sp->config.intr_type == INTA)
3663                 val64 |= SWAPPER_CTRL_XMSI_SE;
3664         writeq(val64, &bar0->swapper_ctrl);
3665 #else
3666         /*
3667          * Initially we enable all bits to make it accessible by the
3668          * driver, then we selectively enable only those bits that
3669          * we want to set.
3670          */
3671         val64 |= (SWAPPER_CTRL_TXP_FE |
3672                  SWAPPER_CTRL_TXP_SE |
3673                  SWAPPER_CTRL_TXD_R_FE |
3674                  SWAPPER_CTRL_TXD_R_SE |
3675                  SWAPPER_CTRL_TXD_W_FE |
3676                  SWAPPER_CTRL_TXD_W_SE |
3677                  SWAPPER_CTRL_TXF_R_FE |
3678                  SWAPPER_CTRL_RXD_R_FE |
3679                  SWAPPER_CTRL_RXD_R_SE |
3680                  SWAPPER_CTRL_RXD_W_FE |
3681                  SWAPPER_CTRL_RXD_W_SE |
3682                  SWAPPER_CTRL_RXF_W_FE |
3683                  SWAPPER_CTRL_XMSI_FE |
3684                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3685         if (sp->config.intr_type == INTA)
3686                 val64 |= SWAPPER_CTRL_XMSI_SE;
3687         writeq(val64, &bar0->swapper_ctrl);
3688 #endif
3689         val64 = readq(&bar0->swapper_ctrl);
3690
3691         /*
3692          * Verifying if endian settings are accurate by reading a
3693          * feedback register.
3694          */
3695         val64 = readq(&bar0->pif_rd_swapper_fb);
3696         if (val64 != 0x0123456789ABCDEFULL) {
3697                 /* Endian settings are incorrect, calls for another dekko. */
3698                 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3699                           dev->name);
3700                 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3701                           (unsigned long long) val64);
3702                 return FAILURE;
3703         }
3704
3705         return SUCCESS;
3706 }
3707
3708 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3709 {
3710         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3711         u64 val64;
3712         int ret = 0, cnt = 0;
3713
3714         do {
3715                 val64 = readq(&bar0->xmsi_access);
3716                 if (!(val64 & s2BIT(15)))
3717                         break;
3718                 mdelay(1);
3719                 cnt++;
3720         } while(cnt < 5);
3721         if (cnt == 5) {
3722                 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3723                 ret = 1;
3724         }
3725
3726         return ret;
3727 }
3728
3729 static void restore_xmsi_data(struct s2io_nic *nic)
3730 {
3731         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3732         u64 val64;
3733         int i;
3734
3735         for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3736                 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3737                 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3738                 val64 = (s2BIT(7) | s2BIT(15) | vBIT(i, 26, 6));
3739                 writeq(val64, &bar0->xmsi_access);
3740                 if (wait_for_msix_trans(nic, i)) {
3741                         DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3742                         continue;
3743                 }
3744         }
3745 }
3746
3747 static void store_xmsi_data(struct s2io_nic *nic)
3748 {
3749         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3750         u64 val64, addr, data;
3751         int i;
3752
3753         /* Store and display */
3754         for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3755                 val64 = (s2BIT(15) | vBIT(i, 26, 6));
3756                 writeq(val64, &bar0->xmsi_access);
3757                 if (wait_for_msix_trans(nic, i)) {
3758                         DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3759                         continue;
3760                 }
3761                 addr = readq(&bar0->xmsi_address);
3762                 data = readq(&bar0->xmsi_data);
3763                 if (addr && data) {
3764                         nic->msix_info[i].addr = addr;
3765                         nic->msix_info[i].data = data;
3766                 }
3767         }
3768 }
3769
3770 static int s2io_enable_msi_x(struct s2io_nic *nic)
3771 {
3772         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3773         u64 tx_mat, rx_mat;
3774         u16 msi_control; /* Temp variable */
3775         int ret, i, j, msix_indx = 1;
3776
3777         nic->entries = kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct msix_entry),
3778                                GFP_KERNEL);
3779         if (!nic->entries) {
3780                 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \
3781                         __FUNCTION__);
3782                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3783                 return -ENOMEM;
3784         }
3785         nic->mac_control.stats_info->sw_stat.mem_allocated
3786                 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3787
3788         nic->s2io_entries =
3789                 kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct s2io_msix_entry),
3790                                    GFP_KERNEL);
3791         if (!nic->s2io_entries) {
3792                 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3793                         __FUNCTION__);
3794                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3795                 kfree(nic->entries);
3796                 nic->mac_control.stats_info->sw_stat.mem_freed
3797                         += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3798                 return -ENOMEM;
3799         }
3800          nic->mac_control.stats_info->sw_stat.mem_allocated
3801                 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3802
3803         for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3804                 nic->entries[i].entry = i;
3805                 nic->s2io_entries[i].entry = i;
3806                 nic->s2io_entries[i].arg = NULL;
3807                 nic->s2io_entries[i].in_use = 0;
3808         }
3809
3810         tx_mat = readq(&bar0->tx_mat0_n[0]);
3811         for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
3812                 tx_mat |= TX_MAT_SET(i, msix_indx);
3813                 nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
3814                 nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
3815                 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3816         }
3817         writeq(tx_mat, &bar0->tx_mat0_n[0]);
3818
3819         rx_mat = readq(&bar0->rx_mat);
3820         for (j = 0; j < nic->config.rx_ring_num; j++, msix_indx++) {
3821                 rx_mat |= RX_MAT_SET(j, msix_indx);
3822                 nic->s2io_entries[msix_indx].arg
3823                         = &nic->mac_control.rings[j];
3824                 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3825                 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3826         }
3827         writeq(rx_mat, &bar0->rx_mat);
3828
3829         nic->avail_msix_vectors = 0;
3830         ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
3831         /* We fail init if error or we get less vectors than min required */
3832         if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) {
3833                 nic->avail_msix_vectors = ret;
3834                 ret = pci_enable_msix(nic->pdev, nic->entries, ret);
3835         }
3836         if (ret) {
3837                 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3838                 kfree(nic->entries);
3839                 nic->mac_control.stats_info->sw_stat.mem_freed
3840                         += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3841                 kfree(nic->s2io_entries);
3842                 nic->mac_control.stats_info->sw_stat.mem_freed
3843                 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3844                 nic->entries = NULL;
3845                 nic->s2io_entries = NULL;
3846                 nic->avail_msix_vectors = 0;
3847                 return -ENOMEM;
3848         }
3849         if (!nic->avail_msix_vectors)
3850                 nic->avail_msix_vectors = MAX_REQUESTED_MSI_X;
3851
3852         /*
3853          * To enable MSI-X, MSI also needs to be enabled, due to a bug
3854          * in the herc NIC. (Temp change, needs to be removed later)
3855          */
3856         pci_read_config_word(nic->pdev, 0x42, &msi_control);
3857         msi_control |= 0x1; /* Enable MSI */
3858         pci_write_config_word(nic->pdev, 0x42, msi_control);
3859
3860         return 0;
3861 }
3862
3863 /* Handle software interrupt used during MSI(X) test */
3864 static irqreturn_t s2io_test_intr(int irq, void *dev_id)
3865 {
3866         struct s2io_nic *sp = dev_id;
3867
3868         sp->msi_detected = 1;
3869         wake_up(&sp->msi_wait);
3870
3871         return IRQ_HANDLED;
3872 }
3873
3874 /* Test interrupt path by forcing a a software IRQ */
3875 static int s2io_test_msi(struct s2io_nic *sp)
3876 {
3877         struct pci_dev *pdev = sp->pdev;
3878         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3879         int err;
3880         u64 val64, saved64;
3881
3882         err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3883                         sp->name, sp);
3884         if (err) {
3885                 DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3886                        sp->dev->name, pci_name(pdev), pdev->irq);
3887                 return err;
3888         }
3889
3890         init_waitqueue_head (&sp->msi_wait);
3891         sp->msi_detected = 0;
3892
3893         saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3894         val64 |= SCHED_INT_CTRL_ONE_SHOT;
3895         val64 |= SCHED_INT_CTRL_TIMER_EN;
3896         val64 |= SCHED_INT_CTRL_INT2MSI(1);
3897         writeq(val64, &bar0->scheduled_int_ctrl);
3898
3899         wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3900
3901         if (!sp->msi_detected) {
3902                 /* MSI(X) test failed, go back to INTx mode */
3903                 DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
3904                         "using MSI(X) during test\n", sp->dev->name,
3905                         pci_name(pdev));
3906
3907                 err = -EOPNOTSUPP;
3908         }
3909
3910         free_irq(sp->entries[1].vector, sp);
3911
3912         writeq(saved64, &bar0->scheduled_int_ctrl);
3913
3914         return err;
3915 }
3916
3917 static void remove_msix_isr(struct s2io_nic *sp)
3918 {
3919         int i;
3920         u16 msi_control;
3921
3922         for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3923                 if (sp->s2io_entries[i].in_use ==
3924                         MSIX_REGISTERED_SUCCESS) {
3925                         int vector = sp->entries[i].vector;
3926                         void *arg = sp->s2io_entries[i].arg;
3927                         free_irq(vector, arg);
3928                 }
3929         }
3930
3931         kfree(sp->entries);
3932         kfree(sp->s2io_entries);
3933         sp->entries = NULL;
3934         sp->s2io_entries = NULL;
3935
3936         pci_read_config_word(sp->pdev, 0x42, &msi_control);
3937         msi_control &= 0xFFFE; /* Disable MSI */
3938         pci_write_config_word(sp->pdev, 0x42, msi_control);
3939
3940         pci_disable_msix(sp->pdev);
3941 }
3942
3943 static void remove_inta_isr(struct s2io_nic *sp)
3944 {
3945         struct net_device *dev = sp->dev;
3946
3947         free_irq(sp->pdev->irq, dev);
3948 }
3949
3950 /* ********************************************************* *
3951  * Functions defined below concern the OS part of the driver *
3952  * ********************************************************* */
3953
3954 /**
3955  *  s2io_open - open entry point of the driver
3956  *  @dev : pointer to the device structure.
3957  *  Description:
3958  *  This function is the open entry point of the driver. It mainly calls a
3959  *  function to allocate Rx buffers and inserts them into the buffer
3960  *  descriptors and then enables the Rx part of the NIC.
3961  *  Return value:
3962  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3963  *   file on failure.
3964  */
3965
3966 static int s2io_open(struct net_device *dev)
3967 {
3968         struct s2io_nic *sp = dev->priv;
3969         int err = 0;
3970
3971         /*
3972          * Make sure you have link off by default every time
3973          * Nic is initialized
3974          */
3975         netif_carrier_off(dev);
3976         sp->last_link_state = 0;
3977
3978         if (sp->config.intr_type == MSI_X) {
3979                 int ret = s2io_enable_msi_x(sp);
3980
3981                 if (!ret) {
3982                         ret = s2io_test_msi(sp);
3983                         /* rollback MSI-X, will re-enable during add_isr() */
3984                         remove_msix_isr(sp);
3985                 }
3986                 if (ret) {
3987
3988                         DBG_PRINT(ERR_DBG,
3989                           "%s: MSI-X requested but failed to enable\n",
3990                           dev->name);
3991                         sp->config.intr_type = INTA;
3992                 }
3993         }
3994
3995         /* NAPI doesn't work well with MSI(X) */
3996          if (sp->config.intr_type != INTA) {
3997                 if(sp->config.napi)
3998                         sp->config.napi = 0;
3999         }
4000
4001         /* Initialize H/W and enable interrupts */
4002         err = s2io_card_up(sp);
4003         if (err) {
4004                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4005                           dev->name);
4006                 goto hw_init_failed;
4007         }
4008
4009         if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
4010                 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
4011                 s2io_card_down(sp);
4012                 err = -ENODEV;
4013                 goto hw_init_failed;
4014         }
4015         s2io_start_all_tx_queue(sp);
4016         return 0;
4017
4018 hw_init_failed:
4019         if (sp->config.intr_type == MSI_X) {
4020                 if (sp->entries) {
4021                         kfree(sp->entries);
4022                         sp->mac_control.stats_info->sw_stat.mem_freed
4023                         += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
4024                 }
4025                 if (sp->s2io_entries) {
4026                         kfree(sp->s2io_entries);
4027                         sp->mac_control.stats_info->sw_stat.mem_freed
4028                         += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
4029                 }
4030         }
4031         return err;
4032 }
4033
4034 /**
4035  *  s2io_close -close entry point of the driver
4036  *  @dev : device pointer.
4037  *  Description:
4038  *  This is the stop entry point of the driver. It needs to undo exactly
4039  *  whatever was done by the open entry point,thus it's usually referred to
4040  *  as the close function.Among other things this function mainly stops the
4041  *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
4042  *  Return value:
4043  *  0 on success and an appropriate (-)ve integer as defined in errno.h
4044  *  file on failure.
4045  */
4046
4047 static int s2io_close(struct net_device *dev)
4048 {
4049         struct s2io_nic *sp = dev->priv;
4050         struct config_param *config = &sp->config;
4051         u64 tmp64;
4052         int offset;
4053
4054         /* Return if the device is already closed               *
4055         *  Can happen when s2io_card_up failed in change_mtu    *
4056         */
4057         if (!is_s2io_card_up(sp))
4058                 return 0;
4059
4060         s2io_stop_all_tx_queue(sp);
4061         /* delete all populated mac entries */
4062         for (offset = 1; offset < config->max_mc_addr; offset++) {
4063                 tmp64 = do_s2io_read_unicast_mc(sp, offset);
4064                 if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
4065                         do_s2io_delete_unicast_mc(sp, tmp64);
4066         }
4067
4068         s2io_card_down(sp);
4069
4070         return 0;
4071 }
4072
4073 /**
4074  *  s2io_xmit - Tx entry point of te driver
4075  *  @skb : the socket buffer containing the Tx data.
4076  *  @dev : device pointer.
4077  *  Description :
4078  *  This function is the Tx entry point of the driver. S2IO NIC supports
4079  *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
4080  *  NOTE: when device cant queue the pkt,just the trans_start variable will
4081  *  not be upadted.
4082  *  Return value:
4083  *  0 on success & 1 on failure.
4084  */
4085
4086 static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4087 {
4088         struct s2io_nic *sp = dev->priv;
4089         u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4090         register u64 val64;
4091         struct TxD *txdp;
4092         struct TxFIFO_element __iomem *tx_fifo;
4093         unsigned long flags = 0;
4094         u16 vlan_tag = 0;
4095         struct fifo_info *fifo = NULL;
4096         struct mac_info *mac_control;
4097         struct config_param *config;
4098         int do_spin_lock = 1;
4099         int offload_type;
4100         int enable_per_list_interrupt = 0;
4101         struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
4102
4103         mac_control = &sp->mac_control;
4104         config = &sp->config;
4105
4106         DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
4107
4108         if (unlikely(skb->len <= 0)) {
4109                 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
4110                 dev_kfree_skb_any(skb);
4111                 return 0;
4112         }
4113
4114         if (!is_s2io_card_up(sp)) {
4115                 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4116                           dev->name);
4117                 dev_kfree_skb(skb);
4118                 return 0;
4119         }
4120
4121         queue = 0;
4122         if (sp->vlgrp && vlan_tx_tag_present(skb))
4123                 vlan_tag = vlan_tx_tag_get(skb);
4124         if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4125                 if (skb->protocol == htons(ETH_P_IP)) {
4126                         struct iphdr *ip;
4127                         struct tcphdr *th;
4128                         ip = ip_hdr(skb);
4129
4130                         if ((ip->frag_off & htons(IP_OFFSET|IP_MF)) == 0) {
4131                                 th = (struct tcphdr *)(((unsigned char *)ip) +
4132                                                 ip->ihl*4);
4133
4134                                 if (ip->protocol == IPPROTO_TCP) {
4135                                         queue_len = sp->total_tcp_fifos;
4136                                         queue = (ntohs(th->source) +
4137                                                         ntohs(th->dest)) &
4138                                             sp->fifo_selector[queue_len - 1];
4139                                         if (queue >= queue_len)
4140                                                 queue = queue_len - 1;
4141                                 } else if (ip->protocol == IPPROTO_UDP) {
4142                                         queue_len = sp->total_udp_fifos;
4143                                         queue = (ntohs(th->source) +
4144                                                         ntohs(th->dest)) &
4145                                             sp->fifo_selector[queue_len - 1];
4146                                         if (queue >= queue_len)
4147                                                 queue = queue_len - 1;
4148                                         queue += sp->udp_fifo_idx;
4149                                         if (skb->len > 1024)
4150                                                 enable_per_list_interrupt = 1;
4151                                         do_spin_lock = 0;
4152                                 }
4153                         }
4154                 }
4155         } else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4156                 /* get fifo number based on skb->priority value */
4157                 queue = config->fifo_mapping
4158                                         [skb->priority & (MAX_TX_FIFOS - 1)];
4159         fifo = &mac_control->fifos[queue];
4160
4161         if (do_spin_lock)
4162                 spin_lock_irqsave(&fifo->tx_lock, flags);
4163         else {
4164                 if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
4165                         return NETDEV_TX_LOCKED;
4166         }
4167
4168 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
4169         if (sp->config.multiq) {
4170                 if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4171                         spin_unlock_irqrestore(&fifo->tx_lock, flags);
4172                         return NETDEV_TX_BUSY;
4173                 }
4174         } else
4175 #endif
4176         if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
4177                 if (netif_queue_stopped(dev)) {
4178                         spin_unlock_irqrestore(&fifo->tx_lock, flags);
4179                         return NETDEV_TX_BUSY;
4180                 }
4181         }
4182
4183         put_off = (u16) fifo->tx_curr_put_info.offset;
4184         get_off = (u16) fifo->tx_curr_get_info.offset;
4185         txdp = (struct TxD *) fifo->list_info[put_off].list_virt_addr;
4186
4187         queue_len = fifo->tx_curr_put_info.fifo_len + 1;
4188         /* Avoid "put" pointer going beyond "get" pointer */
4189         if (txdp->Host_Control ||
4190                    ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4191                 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4192                 s2io_stop_tx_queue(sp, fifo->fifo_no);
4193                 dev_kfree_skb(skb);
4194                 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4195                 return 0;
4196         }
4197
4198         offload_type = s2io_offload_type(skb);
4199         if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4200                 txdp->Control_1 |= TXD_TCP_LSO_EN;
4201                 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4202         }
4203         if (skb->ip_summed == CHECKSUM_PARTIAL) {
4204                 txdp->Control_2 |=
4205                     (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
4206                      TXD_TX_CKO_UDP_EN);
4207         }
4208         txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4209         txdp->Control_1 |= TXD_LIST_OWN_XENA;
4210         txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
4211         if (enable_per_list_interrupt)
4212                 if (put_off & (queue_len >> 5))
4213                         txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
4214         if (vlan_tag) {
4215                 txdp->Control_2 |= TXD_VLAN_ENABLE;
4216                 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4217         }
4218
4219         frg_len = skb->len - skb->data_len;
4220         if (offload_type == SKB_GSO_UDP) {
4221                 int ufo_size;
4222
4223                 ufo_size = s2io_udp_mss(skb);
4224                 ufo_size &= ~7;
4225                 txdp->Control_1 |= TXD_UFO_EN;
4226                 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4227                 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4228 #ifdef __BIG_ENDIAN
4229                 /* both variants do cpu_to_be64(be32_to_cpu(...)) */
4230                 fifo->ufo_in_band_v[put_off] =
4231                                 (__force u64)skb_shinfo(skb)->ip6_frag_id;
4232 #else
4233                 fifo->ufo_in_band_v[put_off] =
4234                                 (__force u64)skb_shinfo(skb)->ip6_frag_id << 32;
4235 #endif
4236                 txdp->Host_Control = (unsigned long)fifo->ufo_in_band_v;
4237                 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
4238                                         fifo->ufo_in_band_v,
4239                                         sizeof(u64), PCI_DMA_TODEVICE);
4240                 if((txdp->Buffer_Pointer == 0) ||
4241                         (txdp->Buffer_Pointer == DMA_ERROR_CODE))
4242                         goto pci_map_failed;
4243                 txdp++;
4244         }
4245
4246         txdp->Buffer_Pointer = pci_map_single
4247             (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
4248         if((txdp->Buffer_Pointer == 0) ||
4249                 (txdp->Buffer_Pointer == DMA_ERROR_CODE))
4250                 goto pci_map_failed;
4251
4252         txdp->Host_Control = (unsigned long) skb;
4253         txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4254         if (offload_type == SKB_GSO_UDP)
4255                 txdp->Control_1 |= TXD_UFO_EN;
4256
4257         frg_cnt = skb_shinfo(skb)->nr_frags;
4258         /* For fragmented SKB. */
4259         for (i = 0; i < frg_cnt; i++) {
4260                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4261                 /* A '0' length fragment will be ignored */
4262                 if (!frag->size)
4263                         continue;
4264                 txdp++;
4265                 txdp->Buffer_Pointer = (u64) pci_map_page
4266                     (sp->pdev, frag->page, frag->page_offset,
4267                      frag->size, PCI_DMA_TODEVICE);
4268                 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
4269                 if (offload_type == SKB_GSO_UDP)
4270                         txdp->Control_1 |= TXD_UFO_EN;
4271         }
4272         txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4273
4274         if (offload_type == SKB_GSO_UDP)
4275                 frg_cnt++; /* as Txd0 was used for inband header */
4276
4277         tx_fifo = mac_control->tx_FIFO_start[queue];
4278         val64 = fifo->list_info[put_off].list_phy_addr;
4279         writeq(val64, &tx_fifo->TxDL_Pointer);
4280
4281         val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4282                  TX_FIFO_LAST_LIST);
4283         if (offload_type)
4284                 val64 |= TX_FIFO_SPECIAL_FUNC;
4285
4286         writeq(val64, &tx_fifo->List_Control);
4287
4288         mmiowb();
4289
4290         put_off++;
4291         if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
4292                 put_off = 0;
4293         fifo->tx_curr_put_info.offset = put_off;
4294
4295         /* Avoid "put" pointer going beyond "get" pointer */
4296         if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4297                 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
4298                 DBG_PRINT(TX_DBG,
4299                           "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4300                           put_off, get_off);
4301                 s2io_stop_tx_queue(sp, fifo->fifo_no);
4302         }
4303         mac_control->stats_info->sw_stat.mem_allocated += skb->truesize;
4304         dev->trans_start = jiffies;
4305         spin_unlock_irqrestore(&fifo->tx_lock, flags);
4306
4307         if (sp->config.intr_type == MSI_X)
4308                 tx_intr_handler(fifo);
4309
4310         return 0;
4311 pci_map_failed:
4312         stats->pci_map_fail_cnt++;
4313         s2io_stop_tx_queue(sp, fifo->fifo_no);
4314         stats->mem_freed += skb->truesize;
4315         dev_kfree_skb(skb);
4316         spin_unlock_irqrestore(&fifo->tx_lock, flags);
4317         return 0;
4318 }
4319
4320 static void
4321 s2io_alarm_handle(unsigned long data)
4322 {
4323         struct s2io_nic *sp = (struct s2io_nic *)data;
4324         struct net_device *dev = sp->dev;
4325
4326         s2io_handle_errors(dev);
4327         mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4328 }
4329
4330 static int s2io_chk_rx_buffers(struct ring_info *ring)
4331 {
4332         if (fill_rx_buffers(ring) == -ENOMEM) {
4333                 DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name);
4334                 DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
4335         }
4336         return 0;
4337 }
4338
4339 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4340 {
4341         struct ring_info *ring = (struct ring_info *)dev_id;
4342         struct s2io_nic *sp = ring->nic;
4343
4344         if (!is_s2io_card_up(sp))
4345                 return IRQ_HANDLED;
4346
4347         rx_intr_handler(ring);
4348         s2io_chk_rx_buffers(ring);
4349
4350         return IRQ_HANDLED;
4351 }
4352
4353 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4354 {
4355         struct fifo_info *fifo = (struct fifo_info *)dev_id;
4356         struct s2io_nic *sp = fifo->nic;
4357
4358         if (!is_s2io_card_up(sp))
4359                 return IRQ_HANDLED;
4360
4361         tx_intr_handler(fifo);
4362         return IRQ_HANDLED;
4363 }
4364 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4365 {
4366         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4367         u64 val64;
4368
4369         val64 = readq(&bar0->pic_int_status);
4370         if (val64 & PIC_INT_GPIO) {
4371                 val64 = readq(&bar0->gpio_int_reg);
4372                 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4373                     (val64 & GPIO_INT_REG_LINK_UP)) {
4374                         /*
4375                          * This is unstable state so clear both up/down
4376                          * interrupt and adapter to re-evaluate the link state.
4377                          */
4378                         val64 |=  GPIO_INT_REG_LINK_DOWN;
4379                         val64 |= GPIO_INT_REG_LINK_UP;
4380                         writeq(val64, &bar0->gpio_int_reg);
4381                         val64 = readq(&bar0->gpio_int_mask);
4382                         val64 &= ~(GPIO_INT_MASK_LINK_UP |
4383                                    GPIO_INT_MASK_LINK_DOWN);
4384                         writeq(val64, &bar0->gpio_int_mask);
4385                 }
4386                 else if (val64 & GPIO_INT_REG_LINK_UP) {
4387                         val64 = readq(&bar0->adapter_status);
4388                                 /* Enable Adapter */
4389                         val64 = readq(&bar0->adapter_control);
4390                         val64 |= ADAPTER_CNTL_EN;
4391                         writeq(val64, &bar0->adapter_control);
4392                         val64 |= ADAPTER_LED_ON;
4393                         writeq(val64, &bar0->adapter_control);
4394                         if (!sp->device_enabled_once)
4395                                 sp->device_enabled_once = 1;
4396
4397                         s2io_link(sp, LINK_UP);
4398                         /*
4399                          * unmask link down interrupt and mask link-up
4400                          * intr
4401                          */
4402                         val64 = readq(&bar0->gpio_int_mask);
4403                         val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4404                         val64 |= GPIO_INT_MASK_LINK_UP;
4405                         writeq(val64, &bar0->gpio_int_mask);
4406
4407                 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4408                         val64 = readq(&bar0->adapter_status);
4409                         s2io_link(sp, LINK_DOWN);
4410                         /* Link is down so unmaks link up interrupt */
4411                         val64 = readq(&bar0->gpio_int_mask);
4412                         val64 &= ~GPIO_INT_MASK_LINK_UP;
4413                         val64 |= GPIO_INT_MASK_LINK_DOWN;
4414                         writeq(val64, &bar0->gpio_int_mask);
4415
4416                         /* turn off LED */
4417                         val64 = readq(&bar0->adapter_control);
4418                         val64 = val64 &(~ADAPTER_LED_ON);
4419                         writeq(val64, &bar0->adapter_control);
4420                 }
4421         }
4422         val64 = readq(&bar0->gpio_int_mask);
4423 }
4424
4425 /**
4426  *  do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4427  *  @value: alarm bits
4428  *  @addr: address value
4429  *  @cnt: counter variable
4430  *  Description: Check for alarm and increment the counter
4431  *  Return Value:
4432  *  1 - if alarm bit set
4433  *  0 - if alarm bit is not set
4434  */
4435 static int do_s2io_chk_alarm_bit(u64 value, void __iomem * addr,
4436                           unsigned long long *cnt)
4437 {
4438         u64 val64;
4439         val64 = readq(addr);
4440         if ( val64 & value ) {
4441                 writeq(val64, addr);
4442                 (*cnt)++;
4443                 return 1;
4444         }
4445         return 0;
4446
4447 }
4448
4449 /**
4450  *  s2io_handle_errors - Xframe error indication handler
4451  *  @nic: device private variable
4452  *  Description: Handle alarms such as loss of link, single or
4453  *  double ECC errors, critical and serious errors.
4454  *  Return Value:
4455  *  NONE
4456  */
4457 static void s2io_handle_errors(void * dev_id)
4458 {
4459         struct net_device *dev = (struct net_device *) dev_id;
4460         struct s2io_nic *sp = dev->priv;
4461         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4462         u64 temp64 = 0,val64=0;
4463         int i = 0;
4464
4465         struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4466         struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4467
4468         if (!is_s2io_card_up(sp))
4469                 return;
4470
4471         if (pci_channel_offline(sp->pdev))
4472                 return;
4473
4474         memset(&sw_stat->ring_full_cnt, 0,
4475                 sizeof(sw_stat->ring_full_cnt));
4476
4477         /* Handling the XPAK counters update */
4478         if(stats->xpak_timer_count < 72000) {
4479                 /* waiting for an hour */
4480                 stats->xpak_timer_count++;
4481         } else {
4482                 s2io_updt_xpak_counter(dev);
4483                 /* reset the count to zero */
4484                 stats->xpak_timer_count = 0;
4485         }
4486
4487         /* Handling link status change error Intr */
4488         if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4489                 val64 = readq(&bar0->mac_rmac_err_reg);
4490                 writeq(val64, &bar0->mac_rmac_err_reg);
4491                 if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4492                         schedule_work(&sp->set_link_task);
4493         }
4494
4495         /* In case of a serious error, the device will be Reset. */
4496         if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4497                                 &sw_stat->serious_err_cnt))
4498                 goto reset;
4499
4500         /* Check for data parity error */
4501         if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4502                                 &sw_stat->parity_err_cnt))
4503                 goto reset;
4504
4505         /* Check for ring full counter */
4506         if (sp->device_type == XFRAME_II_DEVICE) {
4507                 val64 = readq(&bar0->ring_bump_counter1);
4508                 for (i=0; i<4; i++) {
4509                         temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4510                         temp64 >>= 64 - ((i+1)*16);
4511                         sw_stat->ring_full_cnt[i] += temp64;
4512                 }
4513
4514                 val64 = readq(&bar0->ring_bump_counter2);
4515                 for (i=0; i<4; i++) {
4516                         temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4517                         temp64 >>= 64 - ((i+1)*16);
4518                          sw_stat->ring_full_cnt[i+4] += temp64;
4519                 }
4520         }
4521
4522         val64 = readq(&bar0->txdma_int_status);
4523         /*check for pfc_err*/
4524         if (val64 & TXDMA_PFC_INT) {
4525                 if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM|
4526                                 PFC_MISC_0_ERR | PFC_MISC_1_ERR|
4527                                 PFC_PCIX_ERR, &bar0->pfc_err_reg,
4528                                 &sw_stat->pfc_err_cnt))
4529                         goto reset;
4530                 do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR, &bar0->pfc_err_reg,
4531                                 &sw_stat->pfc_err_cnt);
4532         }
4533
4534         /*check for tda_err*/
4535         if (val64 & TXDMA_TDA_INT) {
4536                 if(do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
4537                                 TDA_SM1_ERR_ALARM, &bar0->tda_err_reg,
4538                                 &sw_stat->tda_err_cnt))
4539                         goto reset;
4540                 do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4541                                 &bar0->tda_err_reg, &sw_stat->tda_err_cnt);
4542         }
4543         /*check for pcc_err*/
4544         if (val64 & TXDMA_PCC_INT) {
4545                 if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM
4546                                 | PCC_N_SERR | PCC_6_COF_OV_ERR
4547                                 | PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR
4548                                 | PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR
4549                                 | PCC_TXB_ECC_DB_ERR, &bar0->pcc_err_reg,
4550                                 &sw_stat->pcc_err_cnt))
4551                         goto reset;
4552                 do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4553                                 &bar0->pcc_err_reg, &sw_stat->pcc_err_cnt);
4554         }
4555
4556         /*check for tti_err*/
4557         if (val64 & TXDMA_TTI_INT) {
4558                 if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM, &bar0->tti_err_reg,
4559                                 &sw_stat->tti_err_cnt))
4560                         goto reset;
4561                 do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4562                                 &bar0->tti_err_reg, &sw_stat->tti_err_cnt);
4563         }
4564
4565         /*check for lso_err*/
4566         if (val64 & TXDMA_LSO_INT) {
4567                 if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT
4568                                 | LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4569                                 &bar0->lso_err_reg, &sw_stat->lso_err_cnt))
4570                         goto reset;
4571                 do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4572                                 &bar0->lso_err_reg, &sw_stat->lso_err_cnt);
4573         }
4574
4575         /*check for tpa_err*/
4576         if (val64 & TXDMA_TPA_INT) {
4577                 if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM, &bar0->tpa_err_reg,
4578                         &sw_stat->tpa_err_cnt))
4579                         goto reset;
4580                 do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP, &bar0->tpa_err_reg,
4581                         &sw_stat->tpa_err_cnt);
4582         }
4583
4584         /*check for sm_err*/
4585         if (val64 & TXDMA_SM_INT) {
4586                 if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM, &bar0->sm_err_reg,
4587                         &sw_stat->sm_err_cnt))
4588                         goto reset;
4589         }
4590
4591         val64 = readq(&bar0->mac_int_status);
4592         if (val64 & MAC_INT_STATUS_TMAC_INT) {
4593                 if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4594                                 &bar0->mac_tmac_err_reg,
4595                                 &sw_stat->mac_tmac_err_cnt))
4596                         goto reset;
4597                 do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR
4598                                 | TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
4599                                 &bar0->mac_tmac_err_reg,
4600                                 &sw_stat->mac_tmac_err_cnt);
4601         }
4602
4603         val64 = readq(&bar0->xgxs_int_status);
4604         if (val64 & XGXS_INT_STATUS_TXGXS) {
4605                 if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4606                                 &bar0->xgxs_txgxs_err_reg,
4607                                 &sw_stat->xgxs_txgxs_err_cnt))
4608                         goto reset;
4609                 do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4610                                 &bar0->xgxs_txgxs_err_reg,
4611                                 &sw_stat->xgxs_txgxs_err_cnt);
4612         }
4613
4614         val64 = readq(&bar0->rxdma_int_status);
4615         if (val64 & RXDMA_INT_RC_INT_M) {
4616                 if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR
4617                                 | RC_PRCn_SM_ERR_ALARM |RC_FTC_SM_ERR_ALARM,
4618                                 &bar0->rc_err_reg, &sw_stat->rc_err_cnt))
4619                         goto reset;
4620                 do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR
4621                                 | RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4622                                 &sw_stat->rc_err_cnt);
4623                 if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn
4624                                 | PRC_PCI_AB_F_WR_Rn, &bar0->prc_pcix_err_reg,
4625                                 &sw_stat->prc_pcix_err_cnt))
4626                         goto reset;
4627                 do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn | PRC_PCI_DP_WR_Rn
4628                                 | PRC_PCI_DP_F_WR_Rn, &bar0->prc_pcix_err_reg,
4629                                 &sw_stat->prc_pcix_err_cnt);
4630         }
4631
4632         if (val64 & RXDMA_INT_RPA_INT_M) {
4633                 if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4634                                 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt))
4635                         goto reset;
4636                 do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4637                                 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt);
4638         }
4639
4640         if (val64 & RXDMA_INT_RDA_INT_M) {
4641                 if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR
4642                                 | RDA_FRM_ECC_DB_N_AERR | RDA_SM1_ERR_ALARM
4643                                 | RDA_SM0_ERR_ALARM | RDA_RXD_ECC_DB_SERR,
4644                                 &bar0->rda_err_reg, &sw_stat->rda_err_cnt))
4645                         goto reset;
4646                 do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR | RDA_FRM_ECC_SG_ERR
4647                                 | RDA_MISC_ERR | RDA_PCIX_ERR,
4648                                 &bar0->rda_err_reg, &sw_stat->rda_err_cnt);
4649         }
4650
4651         if (val64 & RXDMA_INT_RTI_INT_M) {
4652                 if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM, &bar0->rti_err_reg,
4653                                 &sw_stat->rti_err_cnt))
4654                         goto reset;
4655                 do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4656                                 &bar0->rti_err_reg, &sw_stat->rti_err_cnt);
4657         }
4658
4659         val64 = readq(&bar0->mac_int_status);
4660         if (val64 & MAC_INT_STATUS_RMAC_INT) {
4661                 if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4662                                 &bar0->mac_rmac_err_reg,
4663                                 &sw_stat->mac_rmac_err_cnt))
4664                         goto reset;
4665                 do_s2io_chk_alarm_bit(RMAC_UNUSED_INT|RMAC_SINGLE_ECC_ERR|
4666                                 RMAC_DOUBLE_ECC_ERR, &bar0->mac_rmac_err_reg,
4667                                 &sw_stat->mac_rmac_err_cnt);
4668         }
4669
4670         val64 = readq(&bar0->xgxs_int_status);
4671         if (val64 & XGXS_INT_STATUS_RXGXS) {
4672                 if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4673                                 &bar0->xgxs_rxgxs_err_reg,
4674                                 &sw_stat->xgxs_rxgxs_err_cnt))
4675                         goto reset;
4676         }
4677
4678         val64 = readq(&bar0->mc_int_status);
4679         if(val64 & MC_INT_STATUS_MC_INT) {
4680                 if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR, &bar0->mc_err_reg,
4681                                 &sw_stat->mc_err_cnt))
4682                         goto reset;
4683
4684                 /* Handling Ecc errors */
4685                 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4686                         writeq(val64, &bar0->mc_err_reg);
4687                         if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4688                                 sw_stat->double_ecc_errs++;
4689                                 if (sp->device_type != XFRAME_II_DEVICE) {
4690                                         /*
4691                                          * Reset XframeI only if critical error
4692                                          */
4693                                         if (val64 &
4694                                                 (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4695                                                 MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4696                                                                 goto reset;
4697                                         }
4698                         } else
4699                                 sw_stat->single_ecc_errs++;
4700                 }
4701         }
4702         return;
4703
4704 reset:
4705         s2io_stop_all_tx_queue(sp);
4706         schedule_work(&sp->rst_timer_task);
4707         sw_stat->soft_reset_cnt++;
4708         return;
4709 }
4710
4711 /**
4712  *  s2io_isr - ISR handler of the device .
4713  *  @irq: the irq of the device.
4714  *  @dev_id: a void pointer to the dev structure of the NIC.
4715  *  Description:  This function is the ISR handler of the device. It
4716  *  identifies the reason for the interrupt and calls the relevant
4717  *  service routines. As a contongency measure, this ISR allocates the
4718  *  recv buffers, if their numbers are below the panic value which is
4719  *  presently set to 25% of the original number of rcv buffers allocated.
4720  *  Return value:
4721  *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
4722  *   IRQ_NONE: will be returned if interrupt is not from our device
4723  */
4724 static irqreturn_t s2io_isr(int irq, void *dev_id)
4725 {
4726         struct net_device *dev = (struct net_device *) dev_id;
4727         struct s2io_nic *sp = dev->priv;
4728         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4729         int i;
4730         u64 reason = 0;
4731         struct mac_info *mac_control;
4732         struct config_param *config;
4733
4734         /* Pretend we handled any irq's from a disconnected card */
4735         if (pci_channel_offline(sp->pdev))
4736                 return IRQ_NONE;
4737
4738         if (!is_s2io_card_up(sp))
4739                 return IRQ_NONE;
4740
4741         mac_control = &sp->mac_control;
4742         config = &sp->config;
4743
4744         /*
4745          * Identify the cause for interrupt and call the appropriate
4746          * interrupt handler. Causes for the interrupt could be;
4747          * 1. Rx of packet.
4748          * 2. Tx complete.
4749          * 3. Link down.
4750          */
4751         reason = readq(&bar0->general_int_status);
4752
4753         if (unlikely(reason == S2IO_MINUS_ONE) ) {
4754                 /* Nothing much can be done. Get out */
4755                 return IRQ_HANDLED;
4756         }
4757
4758         if (reason & (GEN_INTR_RXTRAFFIC |
4759                 GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC))
4760         {
4761                 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4762
4763                 if (config->napi) {
4764                         if (reason & GEN_INTR_RXTRAFFIC) {
4765                                 if (likely(netif_rx_schedule_prep(dev,
4766                                                         &sp->napi))) {
4767                                         __netif_rx_schedule(dev, &sp->napi);
4768                                         writeq(S2IO_MINUS_ONE,
4769                                                &bar0->rx_traffic_mask);
4770                                 } else
4771                                         writeq(S2IO_MINUS_ONE,
4772                                                &bar0->rx_traffic_int);
4773                         }
4774                 } else {
4775                         /*
4776                          * rx_traffic_int reg is an R1 register, writing all 1's
4777                          * will ensure that the actual interrupt causing bit
4778                          * get's cleared and hence a read can be avoided.
4779                          */
4780                         if (reason & GEN_INTR_RXTRAFFIC)
4781                                 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4782
4783                         for (i = 0; i < config->rx_ring_num; i++)
4784                                 rx_intr_handler(&mac_control->rings[i]);
4785                 }
4786
4787                 /*
4788                  * tx_traffic_int reg is an R1 register, writing all 1's
4789                  * will ensure that the actual interrupt causing bit get's
4790                  * cleared and hence a read can be avoided.
4791                  */
4792                 if (reason & GEN_INTR_TXTRAFFIC)
4793                         writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4794
4795                 for (i = 0; i < config->tx_fifo_num; i++)
4796                         tx_intr_handler(&mac_control->fifos[i]);
4797
4798                 if (reason & GEN_INTR_TXPIC)
4799                         s2io_txpic_intr_handle(sp);
4800
4801                 /*
4802                  * Reallocate the buffers from the interrupt handler itself.
4803                  */
4804                 if (!config->napi) {
4805                         for (i = 0; i < config->rx_ring_num; i++)
4806                                 s2io_chk_rx_buffers(&mac_control->rings[i]);
4807                 }
4808                 writeq(sp->general_int_mask, &bar0->general_int_mask);
4809                 readl(&bar0->general_int_status);
4810
4811                 return IRQ_HANDLED;
4812
4813         }
4814         else if (!reason) {
4815                 /* The interrupt was not raised by us */
4816                 return IRQ_NONE;
4817         }
4818
4819         return IRQ_HANDLED;
4820 }
4821
4822 /**
4823  * s2io_updt_stats -
4824  */
4825 static void s2io_updt_stats(struct s2io_nic *sp)
4826 {
4827         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4828         u64 val64;
4829         int cnt = 0;
4830
4831         if (is_s2io_card_up(sp)) {
4832                 /* Apprx 30us on a 133 MHz bus */
4833                 val64 = SET_UPDT_CLICKS(10) |
4834                         STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4835                 writeq(val64, &bar0->stat_cfg);
4836                 do {
4837                         udelay(100);
4838                         val64 = readq(&bar0->stat_cfg);
4839                         if (!(val64 & s2BIT(0)))
4840                                 break;
4841                         cnt++;
4842                         if (cnt == 5)
4843                                 break; /* Updt failed */
4844                 } while(1);
4845         }
4846 }
4847
4848 /**
4849  *  s2io_get_stats - Updates the device statistics structure.
4850  *  @dev : pointer to the device structure.
4851  *  Description:
4852  *  This function updates the device statistics structure in the s2io_nic
4853  *  structure and returns a pointer to the same.
4854  *  Return value:
4855  *  pointer to the updated net_device_stats structure.
4856  */
4857
4858 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4859 {
4860         struct s2io_nic *sp = dev->priv;
4861         struct mac_info *mac_control;
4862         struct config_param *config;
4863         int i;
4864
4865
4866         mac_control = &sp->mac_control;
4867         config = &sp->config;
4868
4869         /* Configure Stats for immediate updt */
4870         s2io_updt_stats(sp);
4871
4872         sp->stats.tx_packets =
4873                 le32_to_cpu(mac_control->stats_info->tmac_frms);
4874         sp->stats.tx_errors =
4875                 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4876         sp->stats.rx_errors =
4877                 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
4878         sp->stats.multicast =
4879                 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
4880         sp->stats.rx_length_errors =
4881                 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
4882
4883         /* collect per-ring rx_packets and rx_bytes */
4884         sp->stats.rx_packets = sp->stats.rx_bytes = 0;
4885         for (i = 0; i < config->rx_ring_num; i++) {
4886                 sp->stats.rx_packets += mac_control->rings[i].rx_packets;
4887                 sp->stats.rx_bytes += mac_control->rings[i].rx_bytes;
4888         }
4889
4890         return (&sp->stats);
4891 }
4892
4893 /**
4894  *  s2io_set_multicast - entry point for multicast address enable/disable.
4895  *  @dev : pointer to the device structure
4896  *  Description:
4897  *  This function is a driver entry point which gets called by the kernel
4898  *  whenever multicast addresses must be enabled/disabled. This also gets
4899  *  called to set/reset promiscuous mode. Depending on the deivce flag, we
4900  *  determine, if multicast address must be enabled or if promiscuous mode
4901  *  is to be disabled etc.
4902  *  Return value:
4903  *  void.
4904  */
4905
4906 static void s2io_set_multicast(struct net_device *dev)
4907 {
4908         int i, j, prev_cnt;
4909         struct dev_mc_list *mclist;
4910         struct s2io_nic *sp = dev->priv;
4911         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4912         u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4913             0xfeffffffffffULL;
4914         u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
4915         void __iomem *add;
4916         struct config_param *config = &sp->config;
4917
4918         if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4919                 /*  Enable all Multicast addresses */
4920                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4921                        &bar0->rmac_addr_data0_mem);
4922                 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4923                        &bar0->rmac_addr_data1_mem);
4924                 val64 = RMAC_ADDR_CMD_MEM_WE |
4925                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4926                     RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
4927                 writeq(val64, &bar0->rmac_addr_cmd_mem);
4928                 /* Wait till command completes */
4929                 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4930                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4931                                         S2IO_BIT_RESET);
4932
4933                 sp->m_cast_flg = 1;
4934                 sp->all_multi_pos = config->max_mc_addr - 1;
4935         } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4936                 /*  Disable all Multicast addresses */
4937                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4938                        &bar0->rmac_addr_data0_mem);
4939                 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4940                        &bar0->rmac_addr_data1_mem);
4941                 val64 = RMAC_ADDR_CMD_MEM_WE |
4942                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4943                     RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4944                 writeq(val64, &bar0->rmac_addr_cmd_mem);
4945                 /* Wait till command completes */
4946                 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4947                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4948                                         S2IO_BIT_RESET);
4949
4950                 sp->m_cast_flg = 0;
4951                 sp->all_multi_pos = 0;
4952         }
4953
4954         if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4955                 /*  Put the NIC into promiscuous mode */
4956                 add = &bar0->mac_cfg;
4957                 val64 = readq(&bar0->mac_cfg);
4958                 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4959
4960                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4961                 writel((u32) val64, add);
4962                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4963                 writel((u32) (val64 >> 32), (add + 4));
4964
4965                 if (vlan_tag_strip != 1) {
4966                         val64 = readq(&bar0->rx_pa_cfg);
4967                         val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4968                         writeq(val64, &bar0->rx_pa_cfg);
4969                         vlan_strip_flag = 0;
4970                 }
4971
4972                 val64 = readq(&bar0->mac_cfg);
4973                 sp->promisc_flg = 1;
4974                 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4975                           dev->name);
4976         } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4977                 /*  Remove the NIC from promiscuous mode */
4978                 add = &bar0->mac_cfg;
4979                 val64 = readq(&bar0->mac_cfg);
4980                 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4981
4982                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4983                 writel((u32) val64, add);
4984                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4985                 writel((u32) (val64 >> 32), (add + 4));
4986
4987                 if (vlan_tag_strip != 0) {
4988                         val64 = readq(&bar0->rx_pa_cfg);
4989                         val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4990                         writeq(val64, &bar0->rx_pa_cfg);
4991                         vlan_strip_flag = 1;
4992                 }
4993
4994                 val64 = readq(&bar0->mac_cfg);
4995                 sp->promisc_flg = 0;
4996                 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
4997                           dev->name);
4998         }
4999
5000         /*  Update individual M_CAST address list */
5001         if ((!sp->m_cast_flg) && dev->mc_count) {
5002                 if (dev->mc_count >
5003                     (config->max_mc_addr - config->max_mac_addr)) {
5004                         DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
5005                                   dev->name);
5006                         DBG_PRINT(ERR_DBG, "can be added, please enable ");
5007                         DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
5008                         return;
5009                 }
5010
5011                 prev_cnt = sp->mc_addr_count;
5012                 sp->mc_addr_count = dev->mc_count;
5013
5014                 /* Clear out the previous list of Mc in the H/W. */
5015                 for (i = 0; i < prev_cnt; i++) {
5016                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
5017                                &bar0->rmac_addr_data0_mem);
5018                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5019                                 &bar0->rmac_addr_data1_mem);
5020                         val64 = RMAC_ADDR_CMD_MEM_WE |
5021                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5022                             RMAC_ADDR_CMD_MEM_OFFSET
5023                             (config->mc_start_offset + i);
5024                         writeq(val64, &bar0->rmac_addr_cmd_mem);
5025
5026                         /* Wait for command completes */
5027                         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5028                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5029                                         S2IO_BIT_RESET)) {
5030                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
5031                                           dev->name);
5032                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
5033                                 return;
5034                         }
5035                 }
5036
5037                 /* Create the new Rx filter list and update the same in H/W. */
5038                 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
5039                      i++, mclist = mclist->next) {
5040                         memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
5041                                ETH_ALEN);
5042                         mac_addr = 0;
5043                         for (j = 0; j < ETH_ALEN; j++) {
5044                                 mac_addr |= mclist->dmi_addr[j];
5045                                 mac_addr <<= 8;
5046                         }
5047                         mac_addr >>= 8;
5048                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5049                                &bar0->rmac_addr_data0_mem);
5050                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5051                                 &bar0->rmac_addr_data1_mem);
5052                         val64 = RMAC_ADDR_CMD_MEM_WE |
5053                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5054                             RMAC_ADDR_CMD_MEM_OFFSET
5055                             (i + config->mc_start_offset);
5056                         writeq(val64, &bar0->rmac_addr_cmd_mem);
5057
5058                         /* Wait for command completes */
5059                         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5060                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5061                                         S2IO_BIT_RESET)) {
5062                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
5063                                           dev->name);
5064                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
5065                                 return;
5066                         }
5067                 }
5068         }
5069 }
5070
5071 /* read from CAM unicast & multicast addresses and store it in
5072  * def_mac_addr structure
5073  */
5074 void do_s2io_store_unicast_mc(struct s2io_nic *sp)
5075 {
5076         int offset;
5077         u64 mac_addr = 0x0;
5078         struct config_param *config = &sp->config;
5079
5080         /* store unicast & multicast mac addresses */
5081         for (offset = 0; offset < config->max_mc_addr; offset++) {
5082                 mac_addr = do_s2io_read_unicast_mc(sp, offset);
5083                 /* if read fails disable the entry */
5084                 if (mac_addr == FAILURE)
5085                         mac_addr = S2IO_DISABLE_MAC_ENTRY;
5086                 do_s2io_copy_mac_addr(sp, offset, mac_addr);
5087         }
5088 }
5089
5090 /* restore unicast & multicast MAC to CAM from def_mac_addr structure */
5091 static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5092 {
5093         int offset;
5094         struct config_param *config = &sp->config;
5095         /* restore unicast mac address */
5096         for (offset = 0; offset < config->max_mac_addr; offset++)
5097                 do_s2io_prog_unicast(sp->dev,
5098                         sp->def_mac_addr[offset].mac_addr);
5099
5100         /* restore multicast mac address */
5101         for (offset = config->mc_start_offset;
5102                 offset < config->max_mc_addr; offset++)
5103                 do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5104 }
5105
5106 /* add a multicast MAC address to CAM */
5107 static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5108 {
5109         int i;
5110         u64 mac_addr = 0;
5111         struct config_param *config = &sp->config;
5112
5113         for (i = 0; i < ETH_ALEN; i++) {
5114                 mac_addr <<= 8;
5115                 mac_addr |= addr[i];
5116         }
5117         if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5118                 return SUCCESS;
5119
5120         /* check if the multicast mac already preset in CAM */
5121         for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5122                 u64 tmp64;
5123                 tmp64 = do_s2io_read_unicast_mc(sp, i);
5124                 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5125                         break;
5126
5127                 if (tmp64 == mac_addr)
5128                         return SUCCESS;
5129         }
5130         if (i == config->max_mc_addr) {
5131                 DBG_PRINT(ERR_DBG,
5132                         "CAM full no space left for multicast MAC\n");
5133                 return FAILURE;
5134         }
5135         /* Update the internal structure with this new mac address */
5136         do_s2io_copy_mac_addr(sp, i, mac_addr);
5137
5138         return (do_s2io_add_mac(sp, mac_addr, i));
5139 }
5140
5141 /* add MAC address to CAM */
5142 static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
5143 {
5144         u64 val64;
5145         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5146
5147         writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
5148                 &bar0->rmac_addr_data0_mem);
5149
5150         val64 =
5151                 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5152                 RMAC_ADDR_CMD_MEM_OFFSET(off);
5153         writeq(val64, &bar0->rmac_addr_cmd_mem);
5154
5155         /* Wait till command completes */
5156         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5157                 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5158                 S2IO_BIT_RESET)) {
5159                 DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
5160                 return FAILURE;
5161         }
5162         return SUCCESS;
5163 }
5164 /* deletes a specified unicast/multicast mac entry from CAM */
5165 static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5166 {
5167         int offset;
5168         u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5169         struct config_param *config = &sp->config;
5170
5171         for (offset = 1;
5172                 offset < config->max_mc_addr; offset++) {
5173                 tmp64 = do_s2io_read_unicast_mc(sp, offset);
5174                 if (tmp64 == addr) {
5175                         /* disable the entry by writing  0xffffffffffffULL */
5176                         if (do_s2io_add_mac(sp, dis_addr, offset) ==  FAILURE)
5177                                 return FAILURE;
5178                         /* store the new mac list from CAM */
5179                         do_s2io_store_unicast_mc(sp);
5180                         return SUCCESS;
5181                 }
5182         }
5183         DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
5184                         (unsigned long long)addr);
5185         return FAILURE;
5186 }
5187
5188 /* read mac entries from CAM */
5189 static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5190 {
5191         u64 tmp64 = 0xffffffffffff0000ULL, val64;
5192         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5193
5194         /* read mac addr */
5195         val64 =
5196                 RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5197                 RMAC_ADDR_CMD_MEM_OFFSET(offset);
5198         writeq(val64, &bar0->rmac_addr_cmd_mem);
5199
5200         /* Wait till command completes */
5201         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5202                 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5203                 S2IO_BIT_RESET)) {
5204                 DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5205                 return FAILURE;
5206         }
5207         tmp64 = readq(&bar0->rmac_addr_data0_mem);
5208         return (tmp64 >> 16);
5209 }
5210
5211 /**
5212  * s2io_set_mac_addr driver entry point
5213  */
5214
5215 static int s2io_set_mac_addr(struct net_device *dev, void *p)
5216 {
5217         struct sockaddr *addr = p;
5218
5219         if (!is_valid_ether_addr(addr->sa_data))
5220                 return -EINVAL;
5221
5222         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5223
5224         /* store the MAC address in CAM */
5225         return (do_s2io_prog_unicast(dev, dev->dev_addr));
5226 }
5227 /**
5228  *  do_s2io_prog_unicast - Programs the Xframe mac address
5229  *  @dev : pointer to the device structure.
5230  *  @addr: a uchar pointer to the new mac address which is to be set.
5231  *  Description : This procedure will program the Xframe to receive
5232  *  frames with new Mac Address
5233  *  Return value: SUCCESS on success and an appropriate (-)ve integer
5234  *  as defined in errno.h file on failure.
5235  */
5236
5237 static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
5238 {
5239         struct s2io_nic *sp = dev->priv;
5240         register u64 mac_addr = 0, perm_addr = 0;
5241         int i;
5242         u64 tmp64;
5243         struct config_param *config = &sp->config;
5244
5245         /*
5246         * Set the new MAC address as the new unicast filter and reflect this
5247         * change on the device address registered with the OS. It will be
5248         * at offset 0.
5249         */
5250         for (i = 0; i < ETH_ALEN; i++) {
5251                 mac_addr <<= 8;
5252                 mac_addr |= addr[i];
5253                 perm_addr <<= 8;
5254                 perm_addr |= sp->def_mac_addr[0].mac_addr[i];
5255         }
5256
5257         /* check if the dev_addr is different than perm_addr */
5258         if (mac_addr == perm_addr)
5259                 return SUCCESS;
5260
5261         /* check if the mac already preset in CAM */
5262         for (i = 1; i < config->max_mac_addr; i++) {
5263                 tmp64 = do_s2io_read_unicast_mc(sp, i);
5264                 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5265                         break;
5266
5267                 if (tmp64 == mac_addr) {
5268                         DBG_PRINT(INFO_DBG,
5269                         "MAC addr:0x%llx already present in CAM\n",
5270                         (unsigned long long)mac_addr);
5271                         return SUCCESS;
5272                 }
5273         }
5274         if (i == config->max_mac_addr) {
5275                 DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5276                 return FAILURE;
5277         }
5278         /* Update the internal structure with this new mac address */
5279         do_s2io_copy_mac_addr(sp, i, mac_addr);
5280         return (do_s2io_add_mac(sp, mac_addr, i));
5281 }
5282
5283 /**
5284  * s2io_ethtool_sset - Sets different link parameters.
5285  * @sp : private member of the device structure, which is a pointer to the  * s2io_nic structure.
5286  * @info: pointer to the structure with parameters given by ethtool to set
5287  * link information.
5288  * Description:
5289  * The function sets different link parameters provided by the user onto
5290  * the NIC.
5291  * Return value:
5292  * 0 on success.
5293 */
5294
5295 static int s2io_ethtool_sset(struct net_device *dev,
5296                              struct ethtool_cmd *info)
5297 {
5298         struct s2io_nic *sp = dev->priv;
5299         if ((info->autoneg == AUTONEG_ENABLE) ||
5300             (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
5301                 return -EINVAL;
5302         else {
5303                 s2io_close(sp->dev);
5304                 s2io_open(sp->dev);
5305         }
5306
5307         return 0;
5308 }
5309
5310 /**
5311  * s2io_ethtol_gset - Return link specific information.
5312  * @sp : private member of the device structure, pointer to the
5313  *      s2io_nic structure.
5314  * @info : pointer to the structure with parameters given by ethtool
5315  * to return link information.
5316  * Description:
5317  * Returns link specific information like speed, duplex etc.. to ethtool.
5318  * Return value :
5319  * return 0 on success.
5320  */
5321
5322 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
5323 {
5324         struct s2io_nic *sp = dev->priv;
5325         info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5326         info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5327         info->port = PORT_FIBRE;
5328
5329         /* info->transceiver */
5330         info->transceiver = XCVR_EXTERNAL;
5331
5332         if (netif_carrier_ok(sp->dev)) {
5333                 info->speed = 10000;
5334                 info->duplex = DUPLEX_FULL;
5335         } else {
5336                 info->speed = -1;
5337                 info->duplex = -1;
5338         }
5339
5340         info->autoneg = AUTONEG_DISABLE;
5341         return 0;
5342 }
5343
5344 /**
5345  * s2io_ethtool_gdrvinfo - Returns driver specific information.
5346  * @sp : private member of the device structure, which is a pointer to the
5347  * s2io_nic structure.
5348  * @info : pointer to the structure with parameters given by ethtool to
5349  * return driver information.
5350  * Description:
5351  * Returns driver specefic information like name, version etc.. to ethtool.
5352  * Return value:
5353  *  void
5354  */
5355
5356 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5357                                   struct ethtool_drvinfo *info)
5358 {
5359         struct s2io_nic *sp = dev->priv;
5360
5361         strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
5362         strncpy(info->version, s2io_driver_version, sizeof(info->version));
5363         strncpy(info->fw_version, "", sizeof(info->fw_version));
5364         strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5365         info->regdump_len = XENA_REG_SPACE;
5366         info->eedump_len = XENA_EEPROM_SPACE;
5367 }
5368
5369 /**
5370  *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5371  *  @sp: private member of the device structure, which is a pointer to the
5372  *  s2io_nic structure.
5373  *  @regs : pointer to the structure with parameters given by ethtool for
5374  *  dumping the registers.
5375  *  @reg_space: The input argumnet into which all the registers are dumped.
5376  *  Description:
5377  *  Dumps the entire register space of xFrame NIC into the user given
5378  *  buffer area.
5379  * Return value :
5380  * void .
5381 */
5382
5383 static void s2io_ethtool_gregs(struct net_device *dev,
5384                                struct ethtool_regs *regs, void *space)
5385 {
5386         int i;
5387         u64 reg;
5388         u8 *reg_space = (u8 *) space;
5389         struct s2io_nic *sp = dev->priv;
5390
5391         regs->len = XENA_REG_SPACE;
5392         regs->version = sp->pdev->subsystem_device;
5393
5394         for (i = 0; i < regs->len; i += 8) {
5395                 reg = readq(sp->bar0 + i);
5396                 memcpy((reg_space + i), &reg, 8);
5397         }
5398 }
5399
5400 /**
5401  *  s2io_phy_id  - timer function that alternates adapter LED.
5402  *  @data : address of the private member of the device structure, which
5403  *  is a pointer to the s2io_nic structure, provided as an u32.
5404  * Description: This is actually the timer function that alternates the
5405  * adapter LED bit of the adapter control bit to set/reset every time on
5406  * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
5407  *  once every second.
5408 */
5409 static void s2io_phy_id(unsigned long data)
5410 {
5411         struct s2io_nic *sp = (struct s2io_nic *) data;
5412         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5413         u64 val64 = 0;
5414         u16 subid;
5415
5416         subid = sp->pdev->subsystem_device;
5417         if ((sp->device_type == XFRAME_II_DEVICE) ||
5418                    ((subid & 0xFF) >= 0x07)) {
5419                 val64 = readq(&bar0->gpio_control);
5420                 val64 ^= GPIO_CTRL_GPIO_0;
5421                 writeq(val64, &bar0->gpio_control);
5422         } else {
5423                 val64 = readq(&bar0->adapter_control);
5424                 val64 ^= ADAPTER_LED_ON;
5425                 writeq(val64, &bar0->adapter_control);
5426         }
5427
5428         mod_timer(&sp->id_timer, jiffies + HZ / 2);
5429 }
5430
5431 /**
5432  * s2io_ethtool_idnic - To physically identify the nic on the system.
5433  * @sp : private member of the device structure, which is a pointer to the
5434  * s2io_nic structure.
5435  * @id : pointer to the structure with identification parameters given by
5436  * ethtool.
5437  * Description: Used to physically identify the NIC on the system.
5438  * The Link LED will blink for a time specified by the user for
5439  * identification.
5440  * NOTE: The Link has to be Up to be able to blink the LED. Hence
5441  * identification is possible only if it's link is up.
5442  * Return value:
5443  * int , returns 0 on success
5444  */
5445
5446 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
5447 {
5448         u64 val64 = 0, last_gpio_ctrl_val;
5449         struct s2io_nic *sp = dev->priv;
5450         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5451         u16 subid;
5452
5453         subid = sp->pdev->subsystem_device;
5454         last_gpio_ctrl_val = readq(&bar0->gpio_control);
5455         if ((sp->device_type == XFRAME_I_DEVICE) &&
5456                 ((subid & 0xFF) < 0x07)) {
5457                 val64 = readq(&bar0->adapter_control);
5458                 if (!(val64 & ADAPTER_CNTL_EN)) {
5459                         printk(KERN_ERR
5460                                "Adapter Link down, cannot blink LED\n");
5461                         return -EFAULT;
5462                 }
5463         }
5464         if (sp->id_timer.function == NULL) {
5465                 init_timer(&sp->id_timer);
5466                 sp->id_timer.function = s2io_phy_id;
5467                 sp->id_timer.data = (unsigned long) sp;
5468         }
5469         mod_timer(&sp->id_timer, jiffies);
5470         if (data)
5471                 msleep_interruptible(data * HZ);
5472         else
5473                 msleep_interruptible(MAX_FLICKER_TIME);
5474         del_timer_sync(&sp->id_timer);
5475
5476         if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
5477                 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
5478                 last_gpio_ctrl_val = readq(&bar0->gpio_control);
5479         }
5480
5481         return 0;
5482 }
5483
5484 static void s2io_ethtool_gringparam(struct net_device *dev,
5485                                     struct ethtool_ringparam *ering)
5486 {
5487         struct s2io_nic *sp = dev->priv;
5488         int i,tx_desc_count=0,rx_desc_count=0;
5489
5490         if (sp->rxd_mode == RXD_MODE_1)
5491                 ering->rx_max_pending = MAX_RX_DESC_1;
5492         else if (sp->rxd_mode == RXD_MODE_3B)
5493                 ering->rx_max_pending = MAX_RX_DESC_2;
5494
5495         ering->tx_max_pending = MAX_TX_DESC;
5496         for (i = 0 ; i < sp->config.tx_fifo_num ; i++)
5497                 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5498
5499         DBG_PRINT(INFO_DBG,"\nmax txds : %d\n",sp->config.max_txds);
5500         ering->tx_pending = tx_desc_count;
5501         rx_desc_count = 0;
5502         for (i = 0 ; i < sp->config.rx_ring_num ; i++)
5503                 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5504
5505         ering->rx_pending = rx_desc_count;
5506
5507         ering->rx_mini_max_pending = 0;
5508         ering->rx_mini_pending = 0;
5509         if(sp->rxd_mode == RXD_MODE_1)
5510                 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5511         else if (sp->rxd_mode == RXD_MODE_3B)
5512                 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5513         ering->rx_jumbo_pending = rx_desc_count;
5514 }
5515
5516 /**
5517  * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
5518  * @sp : private member of the device structure, which is a pointer to the
5519  *      s2io_nic structure.
5520  * @ep : pointer to the structure with pause parameters given by ethtool.
5521  * Description:
5522  * Returns the Pause frame generation and reception capability of the NIC.
5523  * Return value:
5524  *  void
5525  */
5526 static void s2io_ethtool_getpause_data(struct net_device *dev,
5527                                        struct ethtool_pauseparam *ep)
5528 {
5529         u64 val64;
5530         struct s2io_nic *sp = dev->priv;
5531         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5532
5533         val64 = readq(&bar0->rmac_pause_cfg);
5534         if (val64 & RMAC_PAUSE_GEN_ENABLE)
5535                 ep->tx_pause = TRUE;
5536         if (val64 & RMAC_PAUSE_RX_ENABLE)
5537                 ep->rx_pause = TRUE;
5538         ep->autoneg = FALSE;
5539 }
5540
5541 /**
5542  * s2io_ethtool_setpause_data -  set/reset pause frame generation.
5543  * @sp : private member of the device structure, which is a pointer to the
5544  *      s2io_nic structure.
5545  * @ep : pointer to the structure with pause parameters given by ethtool.
5546  * Description:
5547  * It can be used to set or reset Pause frame generation or reception
5548  * support of the NIC.
5549  * Return value:
5550  * int, returns 0 on Success
5551  */
5552
5553 static int s2io_ethtool_setpause_data(struct